problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_21543
|
rasdani/github-patches
|
git_diff
|
dj-stripe__dj-stripe-859
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test webhook with 1.2.3 vs 2018.11.08 returns 500 error 'No such event: account.external_00000000000000'
Hello, I am using a Stripe account in TEST mode with API version 2018.11.08 and Django Stripe version 1.2.3. When sending a test request to test the webhook, the view returns 500 instead of 200. But when triggering real events, they are correctly handled
In django admin or in stripe logging I can see test events have event ids such as "account.external_00000000000000" or "balance.available_00000000000000" etc but the library is checking TEST_EVENT_ID = "evt_00000000000000"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/models/webhooks.py`
Content:
```
1 import json
2 import warnings
3 from traceback import format_exc
4
5 import stripe
6 from django.db import models
7 from django.utils.functional import cached_property
8
9 from .. import settings as djstripe_settings
10 from .. import webhooks
11 from ..context_managers import stripe_temporary_api_version
12 from ..fields import JSONField
13 from ..signals import webhook_processing_error
14 from ..utils import fix_django_headers
15 from .base import logger
16 from .core import Event
17
18
19 def _get_version():
20 from .. import __version__
21
22 return __version__
23
24
25 class WebhookEventTrigger(models.Model):
26 """
27 An instance of a request that reached the server endpoint for Stripe webhooks.
28
29 Webhook Events are initially **UNTRUSTED**, as it is possible for any web entity to
30 post any data to our webhook url. Data posted may be valid Stripe information, garbage, or even malicious.
31 The 'valid' flag in this model monitors this.
32 """
33
34 id = models.BigAutoField(primary_key=True)
35 remote_ip = models.GenericIPAddressField(help_text="IP address of the request client.")
36 headers = JSONField()
37 body = models.TextField(blank=True)
38 valid = models.BooleanField(
39 default=False, help_text="Whether or not the webhook event has passed validation"
40 )
41 processed = models.BooleanField(
42 default=False,
43 help_text="Whether or not the webhook event has been successfully processed",
44 )
45 exception = models.CharField(max_length=128, blank=True)
46 traceback = models.TextField(
47 blank=True, help_text="Traceback if an exception was thrown during processing"
48 )
49 event = models.ForeignKey(
50 "Event",
51 on_delete=models.SET_NULL,
52 null=True,
53 blank=True,
54 help_text="Event object contained in the (valid) Webhook",
55 )
56 djstripe_version = models.CharField(
57 max_length=32,
58 default=_get_version, # Needs to be a callable, otherwise it's a db default.
59 help_text="The version of dj-stripe when the webhook was received",
60 )
61 created = models.DateTimeField(auto_now_add=True)
62 updated = models.DateTimeField(auto_now=True)
63
64 @classmethod
65 def from_request(cls, request):
66 """
67 Create, validate and process a WebhookEventTrigger given a Django
68 request object.
69
70 The process is three-fold:
71 1. Create a WebhookEventTrigger object from a Django request.
72 2. Validate the WebhookEventTrigger as a Stripe event using the API.
73 3. If valid, process it into an Event object (and child resource).
74 """
75
76 headers = fix_django_headers(request.META)
77 assert headers
78 try:
79 body = request.body.decode(request.encoding or "utf-8")
80 except Exception:
81 body = "(error decoding body)"
82
83 ip = request.META.get("REMOTE_ADDR")
84 if ip is None:
85 warnings.warn(
86 "Could not determine remote IP (missing REMOTE_ADDR). "
87 "This is likely an issue with your wsgi/server setup."
88 )
89 ip = "0.0.0.0"
90 obj = cls.objects.create(headers=headers, body=body, remote_ip=ip)
91
92 try:
93 obj.valid = obj.validate()
94 if obj.valid:
95 if djstripe_settings.WEBHOOK_EVENT_CALLBACK:
96 # If WEBHOOK_EVENT_CALLBACK, pass it for processing
97 djstripe_settings.WEBHOOK_EVENT_CALLBACK(obj)
98 else:
99 # Process the item (do not save it, it'll get saved below)
100 obj.process(save=False)
101 except Exception as e:
102 max_length = WebhookEventTrigger._meta.get_field("exception").max_length
103 obj.exception = str(e)[:max_length]
104 obj.traceback = format_exc()
105
106 # Send the exception as the webhook_processing_error signal
107 webhook_processing_error.send(
108 sender=WebhookEventTrigger, exception=e, data=getattr(e, "http_body", "")
109 )
110
111 # re-raise the exception so Django sees it
112 raise e
113 finally:
114 obj.save()
115
116 return obj
117
118 @cached_property
119 def json_body(self):
120 try:
121 return json.loads(self.body)
122 except ValueError:
123 return {}
124
125 @property
126 def is_test_event(self):
127 return self.json_body.get("id") == webhooks.TEST_EVENT_ID
128
129 def validate(self, api_key=None):
130 """
131 The original contents of the Event message must be confirmed by
132 refetching it and comparing the fetched data with the original data.
133
134 This function makes an API call to Stripe to redownload the Event data
135 and returns whether or not it matches the WebhookEventTrigger data.
136 """
137
138 local_data = self.json_body
139 if "id" not in local_data or "livemode" not in local_data:
140 return False
141
142 if self.is_test_event:
143 logger.info("Test webhook received: {}".format(local_data))
144 return False
145
146 if djstripe_settings.WEBHOOK_VALIDATION is None:
147 # validation disabled
148 return True
149 elif (
150 djstripe_settings.WEBHOOK_VALIDATION == "verify_signature"
151 and djstripe_settings.WEBHOOK_SECRET
152 ):
153 try:
154 stripe.WebhookSignature.verify_header(
155 self.body,
156 self.headers.get("stripe-signature"),
157 djstripe_settings.WEBHOOK_SECRET,
158 djstripe_settings.WEBHOOK_TOLERANCE,
159 )
160 except stripe.error.SignatureVerificationError:
161 return False
162 else:
163 return True
164
165 livemode = local_data["livemode"]
166 api_key = api_key or djstripe_settings.get_default_api_key(livemode)
167
168 # Retrieve the event using the api_version specified in itself
169 with stripe_temporary_api_version(local_data["api_version"], validate=False):
170 remote_data = Event.stripe_class.retrieve(id=local_data["id"], api_key=api_key)
171
172 return local_data["data"] == remote_data["data"]
173
174 def process(self, save=True):
175 # Reset traceback and exception in case of reprocessing
176 self.exception = ""
177 self.traceback = ""
178
179 self.event = Event.process(self.json_body)
180 self.processed = True
181 if save:
182 self.save()
183
184 return self.event
185
```
Path: `djstripe/webhooks.py`
Content:
```
1 """
2 Utils related to processing or registering for webhooks
3
4 A model registers itself here if it wants to be in the list of processing
5 functions for a particular webhook. Each processor will have the ability
6 to modify the event object, access event data, and do what it needs to do
7
8 registrations are keyed by top-level event type (e.g. "invoice", "customer", etc)
9 Each registration entry is a list of processors
10 Each processor in these lists is a function to be called
11 The function signature is:
12 <Event object>
13
14 There is also a "global registry" which is just a list of processors (as defined above)
15
16 NOTE: global processors are called before other processors.
17 """
18 import functools
19 import itertools
20 from collections import defaultdict
21
22 __all__ = ["handler", "handler_all", "call_handlers"]
23
24
25 registrations = defaultdict(list)
26 registrations_global = list()
27
28 TEST_EVENT_ID = "evt_00000000000000"
29
30
31 def handler(*event_types):
32 """
33 Decorator that registers a function as a webhook handler.
34
35 Functions can be registered for event types (e.g. 'customer') or
36 fully qualified event sub-types (e.g. 'customer.subscription.deleted').
37
38 If an event type is specified, the handler will receive callbacks for
39 ALL webhook events of that type. For example, if 'customer' is specified,
40 the handler will receive events for 'customer.subscription.created',
41 'customer.subscription.updated', etc.
42
43 :param event_types: The event type(s) that should be handled.
44 :type event_types: str.
45 """
46
47 def decorator(func):
48 for event_type in event_types:
49 registrations[event_type].append(func)
50 return func
51
52 return decorator
53
54
55 def handler_all(func=None):
56 """
57 Decorator that registers a function as a webhook handler for ALL webhook events.
58
59 Handles all webhooks regardless of event type or sub-type.
60 """
61 if not func:
62 return functools.partial(handler_all)
63
64 registrations_global.append(func)
65
66 return func
67
68
69 def call_handlers(event):
70 """
71 Invoke all handlers for the provided event type/sub-type.
72
73 The handlers are invoked in the following order:
74
75 1. Global handlers
76 2. Event type handlers
77 3. Event sub-type handlers
78
79 Handlers within each group are invoked in order of registration.
80
81 :param event: The event model object.
82 :type event: ``djstripe.models.Event``
83 """
84 chain = [registrations_global]
85
86 # Build up a list of handlers with each qualified part of the event
87 # category and verb. For example, "customer.subscription.created" creates:
88 # 1. "customer"
89 # 2. "customer.subscription"
90 # 3. "customer.subscription.created"
91 for index, _ in enumerate(event.parts):
92 qualified_event_type = ".".join(event.parts[: (index + 1)])
93 chain.append(registrations[qualified_event_type])
94
95 for handler_func in itertools.chain(*chain):
96 handler_func(event=event)
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/djstripe/models/webhooks.py b/djstripe/models/webhooks.py
--- a/djstripe/models/webhooks.py
+++ b/djstripe/models/webhooks.py
@@ -7,7 +7,6 @@
from django.utils.functional import cached_property
from .. import settings as djstripe_settings
-from .. import webhooks
from ..context_managers import stripe_temporary_api_version
from ..fields import JSONField
from ..signals import webhook_processing_error
@@ -124,7 +123,8 @@
@property
def is_test_event(self):
- return self.json_body.get("id") == webhooks.TEST_EVENT_ID
+ event_id = self.json_body.get("id")
+ return event_id and event_id.endswith("_00000000000000")
def validate(self, api_key=None):
"""
diff --git a/djstripe/webhooks.py b/djstripe/webhooks.py
--- a/djstripe/webhooks.py
+++ b/djstripe/webhooks.py
@@ -25,6 +25,8 @@
registrations = defaultdict(list)
registrations_global = list()
+# Legacy. In previous versions of Stripe API, all test events used this ID.
+# Check out issue #779 for more information.
TEST_EVENT_ID = "evt_00000000000000"
|
{"golden_diff": "diff --git a/djstripe/models/webhooks.py b/djstripe/models/webhooks.py\n--- a/djstripe/models/webhooks.py\n+++ b/djstripe/models/webhooks.py\n@@ -7,7 +7,6 @@\n from django.utils.functional import cached_property\n \n from .. import settings as djstripe_settings\n-from .. import webhooks\n from ..context_managers import stripe_temporary_api_version\n from ..fields import JSONField\n from ..signals import webhook_processing_error\n@@ -124,7 +123,8 @@\n \n \t@property\n \tdef is_test_event(self):\n-\t\treturn self.json_body.get(\"id\") == webhooks.TEST_EVENT_ID\n+\t\tevent_id = self.json_body.get(\"id\")\n+\t\treturn event_id and event_id.endswith(\"_00000000000000\")\n \n \tdef validate(self, api_key=None):\n \t\t\"\"\"\ndiff --git a/djstripe/webhooks.py b/djstripe/webhooks.py\n--- a/djstripe/webhooks.py\n+++ b/djstripe/webhooks.py\n@@ -25,6 +25,8 @@\n registrations = defaultdict(list)\n registrations_global = list()\n \n+# Legacy. In previous versions of Stripe API, all test events used this ID.\n+# Check out issue #779 for more information.\n TEST_EVENT_ID = \"evt_00000000000000\"\n", "issue": "Test webhook with 1.2.3 vs 2018.11.08 returns 500 error 'No such event: account.external_00000000000000'\nHello, I am using a Stripe account in TEST mode with API version 2018.11.08 and Django Stripe version 1.2.3. When sending a test request to test the webhook, the view returns 500 instead of 200. But when triggering real events, they are correctly handled \r\n\r\nIn django admin or in stripe logging I can see test events have event ids such as \"account.external_00000000000000\" or \"balance.available_00000000000000\" etc but the library is checking TEST_EVENT_ID = \"evt_00000000000000\" \n", "before_files": [{"content": "import json\nimport warnings\nfrom traceback import format_exc\n\nimport stripe\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom .. import settings as djstripe_settings\nfrom .. import webhooks\nfrom ..context_managers import stripe_temporary_api_version\nfrom ..fields import JSONField\nfrom ..signals import webhook_processing_error\nfrom ..utils import fix_django_headers\nfrom .base import logger\nfrom .core import Event\n\n\ndef _get_version():\n\tfrom .. import __version__\n\n\treturn __version__\n\n\nclass WebhookEventTrigger(models.Model):\n\t\"\"\"\n\tAn instance of a request that reached the server endpoint for Stripe webhooks.\n\n\tWebhook Events are initially **UNTRUSTED**, as it is possible for any web entity to\n\tpost any data to our webhook url. Data posted may be valid Stripe information, garbage, or even malicious.\n\tThe 'valid' flag in this model monitors this.\n\t\"\"\"\n\n\tid = models.BigAutoField(primary_key=True)\n\tremote_ip = models.GenericIPAddressField(help_text=\"IP address of the request client.\")\n\theaders = JSONField()\n\tbody = models.TextField(blank=True)\n\tvalid = models.BooleanField(\n\t\tdefault=False, help_text=\"Whether or not the webhook event has passed validation\"\n\t)\n\tprocessed = models.BooleanField(\n\t\tdefault=False,\n\t\thelp_text=\"Whether or not the webhook event has been successfully processed\",\n\t)\n\texception = models.CharField(max_length=128, blank=True)\n\ttraceback = models.TextField(\n\t\tblank=True, help_text=\"Traceback if an exception was thrown during processing\"\n\t)\n\tevent = models.ForeignKey(\n\t\t\"Event\",\n\t\ton_delete=models.SET_NULL,\n\t\tnull=True,\n\t\tblank=True,\n\t\thelp_text=\"Event object contained in the (valid) Webhook\",\n\t)\n\tdjstripe_version = models.CharField(\n\t\tmax_length=32,\n\t\tdefault=_get_version, # Needs to be a callable, otherwise it's a db default.\n\t\thelp_text=\"The version of dj-stripe when the webhook was received\",\n\t)\n\tcreated = models.DateTimeField(auto_now_add=True)\n\tupdated = models.DateTimeField(auto_now=True)\n\n\t@classmethod\n\tdef from_request(cls, request):\n\t\t\"\"\"\n\t\tCreate, validate and process a WebhookEventTrigger given a Django\n\t\trequest object.\n\n\t\tThe process is three-fold:\n\t\t1. Create a WebhookEventTrigger object from a Django request.\n\t\t2. Validate the WebhookEventTrigger as a Stripe event using the API.\n\t\t3. If valid, process it into an Event object (and child resource).\n\t\t\"\"\"\n\n\t\theaders = fix_django_headers(request.META)\n\t\tassert headers\n\t\ttry:\n\t\t\tbody = request.body.decode(request.encoding or \"utf-8\")\n\t\texcept Exception:\n\t\t\tbody = \"(error decoding body)\"\n\n\t\tip = request.META.get(\"REMOTE_ADDR\")\n\t\tif ip is None:\n\t\t\twarnings.warn(\n\t\t\t\t\"Could not determine remote IP (missing REMOTE_ADDR). \"\n\t\t\t\t\"This is likely an issue with your wsgi/server setup.\"\n\t\t\t)\n\t\t\tip = \"0.0.0.0\"\n\t\tobj = cls.objects.create(headers=headers, body=body, remote_ip=ip)\n\n\t\ttry:\n\t\t\tobj.valid = obj.validate()\n\t\t\tif obj.valid:\n\t\t\t\tif djstripe_settings.WEBHOOK_EVENT_CALLBACK:\n\t\t\t\t\t# If WEBHOOK_EVENT_CALLBACK, pass it for processing\n\t\t\t\t\tdjstripe_settings.WEBHOOK_EVENT_CALLBACK(obj)\n\t\t\t\telse:\n\t\t\t\t\t# Process the item (do not save it, it'll get saved below)\n\t\t\t\t\tobj.process(save=False)\n\t\texcept Exception as e:\n\t\t\tmax_length = WebhookEventTrigger._meta.get_field(\"exception\").max_length\n\t\t\tobj.exception = str(e)[:max_length]\n\t\t\tobj.traceback = format_exc()\n\n\t\t\t# Send the exception as the webhook_processing_error signal\n\t\t\twebhook_processing_error.send(\n\t\t\t\tsender=WebhookEventTrigger, exception=e, data=getattr(e, \"http_body\", \"\")\n\t\t\t)\n\n\t\t\t# re-raise the exception so Django sees it\n\t\t\traise e\n\t\tfinally:\n\t\t\tobj.save()\n\n\t\treturn obj\n\n\t@cached_property\n\tdef json_body(self):\n\t\ttry:\n\t\t\treturn json.loads(self.body)\n\t\texcept ValueError:\n\t\t\treturn {}\n\n\t@property\n\tdef is_test_event(self):\n\t\treturn self.json_body.get(\"id\") == webhooks.TEST_EVENT_ID\n\n\tdef validate(self, api_key=None):\n\t\t\"\"\"\n\t\tThe original contents of the Event message must be confirmed by\n\t\trefetching it and comparing the fetched data with the original data.\n\n\t\tThis function makes an API call to Stripe to redownload the Event data\n\t\tand returns whether or not it matches the WebhookEventTrigger data.\n\t\t\"\"\"\n\n\t\tlocal_data = self.json_body\n\t\tif \"id\" not in local_data or \"livemode\" not in local_data:\n\t\t\treturn False\n\n\t\tif self.is_test_event:\n\t\t\tlogger.info(\"Test webhook received: {}\".format(local_data))\n\t\t\treturn False\n\n\t\tif djstripe_settings.WEBHOOK_VALIDATION is None:\n\t\t\t# validation disabled\n\t\t\treturn True\n\t\telif (\n\t\t\tdjstripe_settings.WEBHOOK_VALIDATION == \"verify_signature\"\n\t\t\tand djstripe_settings.WEBHOOK_SECRET\n\t\t):\n\t\t\ttry:\n\t\t\t\tstripe.WebhookSignature.verify_header(\n\t\t\t\t\tself.body,\n\t\t\t\t\tself.headers.get(\"stripe-signature\"),\n\t\t\t\t\tdjstripe_settings.WEBHOOK_SECRET,\n\t\t\t\t\tdjstripe_settings.WEBHOOK_TOLERANCE,\n\t\t\t\t)\n\t\t\texcept stripe.error.SignatureVerificationError:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\tlivemode = local_data[\"livemode\"]\n\t\tapi_key = api_key or djstripe_settings.get_default_api_key(livemode)\n\n\t\t# Retrieve the event using the api_version specified in itself\n\t\twith stripe_temporary_api_version(local_data[\"api_version\"], validate=False):\n\t\t\tremote_data = Event.stripe_class.retrieve(id=local_data[\"id\"], api_key=api_key)\n\n\t\treturn local_data[\"data\"] == remote_data[\"data\"]\n\n\tdef process(self, save=True):\n\t\t# Reset traceback and exception in case of reprocessing\n\t\tself.exception = \"\"\n\t\tself.traceback = \"\"\n\n\t\tself.event = Event.process(self.json_body)\n\t\tself.processed = True\n\t\tif save:\n\t\t\tself.save()\n\n\t\treturn self.event\n", "path": "djstripe/models/webhooks.py"}, {"content": "\"\"\"\nUtils related to processing or registering for webhooks\n\nA model registers itself here if it wants to be in the list of processing\nfunctions for a particular webhook. Each processor will have the ability\nto modify the event object, access event data, and do what it needs to do\n\nregistrations are keyed by top-level event type (e.g. \"invoice\", \"customer\", etc)\nEach registration entry is a list of processors\nEach processor in these lists is a function to be called\nThe function signature is:\n\t<Event object>\n\nThere is also a \"global registry\" which is just a list of processors (as defined above)\n\nNOTE: global processors are called before other processors.\n\"\"\"\nimport functools\nimport itertools\nfrom collections import defaultdict\n\n__all__ = [\"handler\", \"handler_all\", \"call_handlers\"]\n\n\nregistrations = defaultdict(list)\nregistrations_global = list()\n\nTEST_EVENT_ID = \"evt_00000000000000\"\n\n\ndef handler(*event_types):\n\t\"\"\"\n\tDecorator that registers a function as a webhook handler.\n\n\tFunctions can be registered for event types (e.g. 'customer') or\n\tfully qualified event sub-types (e.g. 'customer.subscription.deleted').\n\n\tIf an event type is specified, the handler will receive callbacks for\n\tALL webhook events of that type. For example, if 'customer' is specified,\n\tthe handler will receive events for 'customer.subscription.created',\n\t'customer.subscription.updated', etc.\n\n\t:param event_types: The event type(s) that should be handled.\n\t:type event_types: str.\n\t\"\"\"\n\n\tdef decorator(func):\n\t\tfor event_type in event_types:\n\t\t\tregistrations[event_type].append(func)\n\t\treturn func\n\n\treturn decorator\n\n\ndef handler_all(func=None):\n\t\"\"\"\n\tDecorator that registers a function as a webhook handler for ALL webhook events.\n\n\tHandles all webhooks regardless of event type or sub-type.\n\t\"\"\"\n\tif not func:\n\t\treturn functools.partial(handler_all)\n\n\tregistrations_global.append(func)\n\n\treturn func\n\n\ndef call_handlers(event):\n\t\"\"\"\n\tInvoke all handlers for the provided event type/sub-type.\n\n\tThe handlers are invoked in the following order:\n\n\t1. Global handlers\n\t2. Event type handlers\n\t3. Event sub-type handlers\n\n\tHandlers within each group are invoked in order of registration.\n\n\t:param event: The event model object.\n\t:type event: ``djstripe.models.Event``\n\t\"\"\"\n\tchain = [registrations_global]\n\n\t# Build up a list of handlers with each qualified part of the event\n\t# category and verb. For example, \"customer.subscription.created\" creates:\n\t# 1. \"customer\"\n\t# 2. \"customer.subscription\"\n\t# 3. \"customer.subscription.created\"\n\tfor index, _ in enumerate(event.parts):\n\t\tqualified_event_type = \".\".join(event.parts[: (index + 1)])\n\t\tchain.append(registrations[qualified_event_type])\n\n\tfor handler_func in itertools.chain(*chain):\n\t\thandler_func(event=event)\n", "path": "djstripe/webhooks.py"}], "after_files": [{"content": "import json\nimport warnings\nfrom traceback import format_exc\n\nimport stripe\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom .. import settings as djstripe_settings\nfrom ..context_managers import stripe_temporary_api_version\nfrom ..fields import JSONField\nfrom ..signals import webhook_processing_error\nfrom ..utils import fix_django_headers\nfrom .base import logger\nfrom .core import Event\n\n\ndef _get_version():\n\tfrom .. import __version__\n\n\treturn __version__\n\n\nclass WebhookEventTrigger(models.Model):\n\t\"\"\"\n\tAn instance of a request that reached the server endpoint for Stripe webhooks.\n\n\tWebhook Events are initially **UNTRUSTED**, as it is possible for any web entity to\n\tpost any data to our webhook url. Data posted may be valid Stripe information, garbage, or even malicious.\n\tThe 'valid' flag in this model monitors this.\n\t\"\"\"\n\n\tid = models.BigAutoField(primary_key=True)\n\tremote_ip = models.GenericIPAddressField(help_text=\"IP address of the request client.\")\n\theaders = JSONField()\n\tbody = models.TextField(blank=True)\n\tvalid = models.BooleanField(\n\t\tdefault=False, help_text=\"Whether or not the webhook event has passed validation\"\n\t)\n\tprocessed = models.BooleanField(\n\t\tdefault=False,\n\t\thelp_text=\"Whether or not the webhook event has been successfully processed\",\n\t)\n\texception = models.CharField(max_length=128, blank=True)\n\ttraceback = models.TextField(\n\t\tblank=True, help_text=\"Traceback if an exception was thrown during processing\"\n\t)\n\tevent = models.ForeignKey(\n\t\t\"Event\",\n\t\ton_delete=models.SET_NULL,\n\t\tnull=True,\n\t\tblank=True,\n\t\thelp_text=\"Event object contained in the (valid) Webhook\",\n\t)\n\tdjstripe_version = models.CharField(\n\t\tmax_length=32,\n\t\tdefault=_get_version, # Needs to be a callable, otherwise it's a db default.\n\t\thelp_text=\"The version of dj-stripe when the webhook was received\",\n\t)\n\tcreated = models.DateTimeField(auto_now_add=True)\n\tupdated = models.DateTimeField(auto_now=True)\n\n\t@classmethod\n\tdef from_request(cls, request):\n\t\t\"\"\"\n\t\tCreate, validate and process a WebhookEventTrigger given a Django\n\t\trequest object.\n\n\t\tThe process is three-fold:\n\t\t1. Create a WebhookEventTrigger object from a Django request.\n\t\t2. Validate the WebhookEventTrigger as a Stripe event using the API.\n\t\t3. If valid, process it into an Event object (and child resource).\n\t\t\"\"\"\n\n\t\theaders = fix_django_headers(request.META)\n\t\tassert headers\n\t\ttry:\n\t\t\tbody = request.body.decode(request.encoding or \"utf-8\")\n\t\texcept Exception:\n\t\t\tbody = \"(error decoding body)\"\n\n\t\tip = request.META.get(\"REMOTE_ADDR\")\n\t\tif ip is None:\n\t\t\twarnings.warn(\n\t\t\t\t\"Could not determine remote IP (missing REMOTE_ADDR). \"\n\t\t\t\t\"This is likely an issue with your wsgi/server setup.\"\n\t\t\t)\n\t\t\tip = \"0.0.0.0\"\n\t\tobj = cls.objects.create(headers=headers, body=body, remote_ip=ip)\n\n\t\ttry:\n\t\t\tobj.valid = obj.validate()\n\t\t\tif obj.valid:\n\t\t\t\tif djstripe_settings.WEBHOOK_EVENT_CALLBACK:\n\t\t\t\t\t# If WEBHOOK_EVENT_CALLBACK, pass it for processing\n\t\t\t\t\tdjstripe_settings.WEBHOOK_EVENT_CALLBACK(obj)\n\t\t\t\telse:\n\t\t\t\t\t# Process the item (do not save it, it'll get saved below)\n\t\t\t\t\tobj.process(save=False)\n\t\texcept Exception as e:\n\t\t\tmax_length = WebhookEventTrigger._meta.get_field(\"exception\").max_length\n\t\t\tobj.exception = str(e)[:max_length]\n\t\t\tobj.traceback = format_exc()\n\n\t\t\t# Send the exception as the webhook_processing_error signal\n\t\t\twebhook_processing_error.send(\n\t\t\t\tsender=WebhookEventTrigger, exception=e, data=getattr(e, \"http_body\", \"\")\n\t\t\t)\n\n\t\t\t# re-raise the exception so Django sees it\n\t\t\traise e\n\t\tfinally:\n\t\t\tobj.save()\n\n\t\treturn obj\n\n\t@cached_property\n\tdef json_body(self):\n\t\ttry:\n\t\t\treturn json.loads(self.body)\n\t\texcept ValueError:\n\t\t\treturn {}\n\n\t@property\n\tdef is_test_event(self):\n\t\tevent_id = self.json_body.get(\"id\")\n\t\treturn event_id and event_id.endswith(\"_00000000000000\")\n\n\tdef validate(self, api_key=None):\n\t\t\"\"\"\n\t\tThe original contents of the Event message must be confirmed by\n\t\trefetching it and comparing the fetched data with the original data.\n\n\t\tThis function makes an API call to Stripe to redownload the Event data\n\t\tand returns whether or not it matches the WebhookEventTrigger data.\n\t\t\"\"\"\n\n\t\tlocal_data = self.json_body\n\t\tif \"id\" not in local_data or \"livemode\" not in local_data:\n\t\t\treturn False\n\n\t\tif self.is_test_event:\n\t\t\tlogger.info(\"Test webhook received: {}\".format(local_data))\n\t\t\treturn False\n\n\t\tif djstripe_settings.WEBHOOK_VALIDATION is None:\n\t\t\t# validation disabled\n\t\t\treturn True\n\t\telif (\n\t\t\tdjstripe_settings.WEBHOOK_VALIDATION == \"verify_signature\"\n\t\t\tand djstripe_settings.WEBHOOK_SECRET\n\t\t):\n\t\t\ttry:\n\t\t\t\tstripe.WebhookSignature.verify_header(\n\t\t\t\t\tself.body,\n\t\t\t\t\tself.headers.get(\"stripe-signature\"),\n\t\t\t\t\tdjstripe_settings.WEBHOOK_SECRET,\n\t\t\t\t\tdjstripe_settings.WEBHOOK_TOLERANCE,\n\t\t\t\t)\n\t\t\texcept stripe.error.SignatureVerificationError:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\treturn True\n\n\t\tlivemode = local_data[\"livemode\"]\n\t\tapi_key = api_key or djstripe_settings.get_default_api_key(livemode)\n\n\t\t# Retrieve the event using the api_version specified in itself\n\t\twith stripe_temporary_api_version(local_data[\"api_version\"], validate=False):\n\t\t\tremote_data = Event.stripe_class.retrieve(id=local_data[\"id\"], api_key=api_key)\n\n\t\treturn local_data[\"data\"] == remote_data[\"data\"]\n\n\tdef process(self, save=True):\n\t\t# Reset traceback and exception in case of reprocessing\n\t\tself.exception = \"\"\n\t\tself.traceback = \"\"\n\n\t\tself.event = Event.process(self.json_body)\n\t\tself.processed = True\n\t\tif save:\n\t\t\tself.save()\n\n\t\treturn self.event\n", "path": "djstripe/models/webhooks.py"}, {"content": "\"\"\"\nUtils related to processing or registering for webhooks\n\nA model registers itself here if it wants to be in the list of processing\nfunctions for a particular webhook. Each processor will have the ability\nto modify the event object, access event data, and do what it needs to do\n\nregistrations are keyed by top-level event type (e.g. \"invoice\", \"customer\", etc)\nEach registration entry is a list of processors\nEach processor in these lists is a function to be called\nThe function signature is:\n\t<Event object>\n\nThere is also a \"global registry\" which is just a list of processors (as defined above)\n\nNOTE: global processors are called before other processors.\n\"\"\"\nimport functools\nimport itertools\nfrom collections import defaultdict\n\n__all__ = [\"handler\", \"handler_all\", \"call_handlers\"]\n\n\nregistrations = defaultdict(list)\nregistrations_global = list()\n\n# Legacy. In previous versions of Stripe API, all test events used this ID.\n# Check out issue #779 for more information.\nTEST_EVENT_ID = \"evt_00000000000000\"\n\n\ndef handler(*event_types):\n\t\"\"\"\n\tDecorator that registers a function as a webhook handler.\n\n\tFunctions can be registered for event types (e.g. 'customer') or\n\tfully qualified event sub-types (e.g. 'customer.subscription.deleted').\n\n\tIf an event type is specified, the handler will receive callbacks for\n\tALL webhook events of that type. For example, if 'customer' is specified,\n\tthe handler will receive events for 'customer.subscription.created',\n\t'customer.subscription.updated', etc.\n\n\t:param event_types: The event type(s) that should be handled.\n\t:type event_types: str.\n\t\"\"\"\n\n\tdef decorator(func):\n\t\tfor event_type in event_types:\n\t\t\tregistrations[event_type].append(func)\n\t\treturn func\n\n\treturn decorator\n\n\ndef handler_all(func=None):\n\t\"\"\"\n\tDecorator that registers a function as a webhook handler for ALL webhook events.\n\n\tHandles all webhooks regardless of event type or sub-type.\n\t\"\"\"\n\tif not func:\n\t\treturn functools.partial(handler_all)\n\n\tregistrations_global.append(func)\n\n\treturn func\n\n\ndef call_handlers(event):\n\t\"\"\"\n\tInvoke all handlers for the provided event type/sub-type.\n\n\tThe handlers are invoked in the following order:\n\n\t1. Global handlers\n\t2. Event type handlers\n\t3. Event sub-type handlers\n\n\tHandlers within each group are invoked in order of registration.\n\n\t:param event: The event model object.\n\t:type event: ``djstripe.models.Event``\n\t\"\"\"\n\tchain = [registrations_global]\n\n\t# Build up a list of handlers with each qualified part of the event\n\t# category and verb. For example, \"customer.subscription.created\" creates:\n\t# 1. \"customer\"\n\t# 2. \"customer.subscription\"\n\t# 3. \"customer.subscription.created\"\n\tfor index, _ in enumerate(event.parts):\n\t\tqualified_event_type = \".\".join(event.parts[: (index + 1)])\n\t\tchain.append(registrations[qualified_event_type])\n\n\tfor handler_func in itertools.chain(*chain):\n\t\thandler_func(event=event)\n", "path": "djstripe/webhooks.py"}]}
| 3,179 | 303 |
gh_patches_debug_36734
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-648
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow self.master to be set so it can be used before service setup.
This also addresses a few testing issues, caches the master property,
and invalidates the cache on disownServiceParent. Fixes #3329.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `master/buildbot/db/schedulers.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import sqlalchemy as sa
17 import sqlalchemy.exc
18 from buildbot.db import base
19
20 class SchedulersConnectorComponent(base.DBConnectorComponent):
21 # Documentation is in developer/database.rst
22
23 def classifyChanges(self, objectid, classifications):
24 def thd(conn):
25 transaction = conn.begin()
26 tbl = self.db.model.scheduler_changes
27 ins_q = tbl.insert()
28 upd_q = tbl.update(
29 ((tbl.c.objectid == objectid)
30 & (tbl.c.changeid == sa.bindparam('wc_changeid'))))
31 for changeid, important in classifications.items():
32 # convert the 'important' value into an integer, since that
33 # is the column type
34 imp_int = important and 1 or 0
35 try:
36 conn.execute(ins_q,
37 objectid=objectid,
38 changeid=changeid,
39 important=imp_int)
40 except (sqlalchemy.exc.ProgrammingError,
41 sqlalchemy.exc.IntegrityError):
42 # insert failed, so try an update
43 conn.execute(upd_q,
44 wc_changeid=changeid,
45 important=imp_int)
46
47 transaction.commit()
48 return self.db.pool.do(thd)
49
50 def flushChangeClassifications(self, objectid, less_than=None):
51 def thd(conn):
52 sch_ch_tbl = self.db.model.scheduler_changes
53 wc = (sch_ch_tbl.c.objectid == objectid)
54 if less_than is not None:
55 wc = wc & (sch_ch_tbl.c.changeid < less_than)
56 q = sch_ch_tbl.delete(whereclause=wc)
57 conn.execute(q)
58 return self.db.pool.do(thd)
59
60 class Thunk: pass
61 def getChangeClassifications(self, objectid, branch=Thunk):
62 def thd(conn):
63 sch_ch_tbl = self.db.model.scheduler_changes
64 ch_tbl = self.db.model.changes
65
66 wc = (sch_ch_tbl.c.objectid == objectid)
67 if branch is not self.Thunk:
68 wc = wc & (
69 (sch_ch_tbl.c.changeid == ch_tbl.c.changeid) &
70 (ch_tbl.c.branch == branch))
71 q = sa.select(
72 [ sch_ch_tbl.c.changeid, sch_ch_tbl.c.important ],
73 whereclause=wc)
74 return dict([ (r.changeid, [False,True][r.important])
75 for r in conn.execute(q) ])
76 return self.db.pool.do(thd)
77
```
Path: `master/buildbot/schedulers/basic.py`
Content:
```
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from twisted.internet import defer, reactor
17 from twisted.python import log
18 from buildbot import util, config
19 from buildbot.util import NotABranch
20 from collections import defaultdict
21 from buildbot.changes import filter, changes
22 from buildbot.schedulers import base, dependent
23
24 class BaseBasicScheduler(base.BaseScheduler):
25 """
26 @param onlyImportant: If True, only important changes will be added to the
27 buildset.
28 @type onlyImportant: boolean
29
30 """
31
32 compare_attrs = (base.BaseScheduler.compare_attrs +
33 ('treeStableTimer', 'change_filter', 'fileIsImportant',
34 'onlyImportant') )
35
36 _reactor = reactor # for tests
37
38 fileIsImportant = None
39
40 class NotSet: pass
41 def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None,
42 builderNames=None, branch=NotABranch, branches=NotABranch,
43 fileIsImportant=None, properties={}, categories=None,
44 change_filter=None, onlyImportant=False, **kwargs):
45 if shouldntBeSet is not self.NotSet:
46 config.error(
47 "pass arguments to schedulers using keyword arguments")
48 if fileIsImportant and not callable(fileIsImportant):
49 config.error(
50 "fileIsImportant must be a callable")
51
52 # initialize parent classes
53 base.BaseScheduler.__init__(self, name, builderNames, properties, **kwargs)
54
55 self.treeStableTimer = treeStableTimer
56 if fileIsImportant is not None:
57 self.fileIsImportant = fileIsImportant
58 self.onlyImportant = onlyImportant
59 self.change_filter = self.getChangeFilter(branch=branch,
60 branches=branches, change_filter=change_filter,
61 categories=categories)
62
63 # the IDelayedCall used to wake up when this scheduler's
64 # treeStableTimer expires.
65 self._stable_timers = defaultdict(lambda : None)
66 self._stable_timers_lock = defer.DeferredLock()
67
68 def getChangeFilter(self, branch, branches, change_filter, categories):
69 raise NotImplementedError
70
71 def startService(self, _returnDeferred=False):
72 base.BaseScheduler.startService(self)
73
74 d = self.startConsumingChanges(fileIsImportant=self.fileIsImportant,
75 change_filter=self.change_filter,
76 onlyImportant=self.onlyImportant)
77
78 # if treeStableTimer is False, then we don't care about classified
79 # changes, so get rid of any hanging around from previous
80 # configurations
81 if not self.treeStableTimer:
82 d.addCallback(lambda _ :
83 self.master.db.schedulers.flushChangeClassifications(
84 self.objectid))
85
86 # otherwise, if there are classified changes out there, start their
87 # treeStableTimers again
88 else:
89 d.addCallback(lambda _ :
90 self.scanExistingClassifiedChanges())
91
92 # handle Deferred errors, since startService does not return a Deferred
93 d.addErrback(log.err, "while starting SingleBranchScheduler '%s'"
94 % self.name)
95
96 if _returnDeferred:
97 return d # only used in tests
98
99 def stopService(self):
100 # the base stopService will unsubscribe from new changes
101 d = base.BaseScheduler.stopService(self)
102 @util.deferredLocked(self._stable_timers_lock)
103 def cancel_timers(_):
104 for timer in self._stable_timers.values():
105 if timer:
106 timer.cancel()
107 self._stable_timers.clear()
108 d.addCallback(cancel_timers)
109 return d
110
111 @util.deferredLocked('_stable_timers_lock')
112 def gotChange(self, change, important):
113 if not self.treeStableTimer:
114 # if there's no treeStableTimer, we can completely ignore
115 # unimportant changes
116 if not important:
117 return defer.succeed(None)
118 # otherwise, we'll build it right away
119 return self.addBuildsetForChanges(reason='scheduler',
120 changeids=[ change.number ])
121
122 timer_name = self.getTimerNameForChange(change)
123
124 # if we have a treeStableTimer, then record the change's importance
125 # and:
126 # - for an important change, start the timer
127 # - for an unimportant change, reset the timer if it is running
128 d = self.master.db.schedulers.classifyChanges(
129 self.objectid, { change.number : important })
130 def fix_timer(_):
131 if not important and not self._stable_timers[timer_name]:
132 return
133 if self._stable_timers[timer_name]:
134 self._stable_timers[timer_name].cancel()
135 def fire_timer():
136 d = self.stableTimerFired(timer_name)
137 d.addErrback(log.err, "while firing stable timer")
138 self._stable_timers[timer_name] = self._reactor.callLater(
139 self.treeStableTimer, fire_timer)
140 d.addCallback(fix_timer)
141 return d
142
143 @defer.inlineCallbacks
144 def scanExistingClassifiedChanges(self):
145 # call gotChange for each classified change. This is called at startup
146 # and is intended to re-start the treeStableTimer for any changes that
147 # had not yet been built when the scheduler was stopped.
148
149 # NOTE: this may double-call gotChange for changes that arrive just as
150 # the scheduler starts up. In practice, this doesn't hurt anything.
151 classifications = \
152 yield self.master.db.schedulers.getChangeClassifications(
153 self.objectid)
154
155 # call gotChange for each change, after first fetching it from the db
156 for changeid, important in classifications.iteritems():
157 chdict = yield self.master.db.changes.getChange(changeid)
158
159 if not chdict:
160 continue
161
162 change = yield changes.Change.fromChdict(self.master, chdict)
163 yield self.gotChange(change, important)
164
165 def getTimerNameForChange(self, change):
166 raise NotImplementedError # see subclasses
167
168 def getChangeClassificationsForTimer(self, objectid, timer_name):
169 """similar to db.schedulers.getChangeClassifications, but given timer
170 name"""
171 raise NotImplementedError # see subclasses
172
173 @util.deferredLocked('_stable_timers_lock')
174 @defer.inlineCallbacks
175 def stableTimerFired(self, timer_name):
176 # if the service has already been stoppd then just bail out
177 if not self._stable_timers[timer_name]:
178 return
179
180 # delete this now-fired timer
181 del self._stable_timers[timer_name]
182
183 classifications = \
184 yield self.getChangeClassificationsForTimer(self.objectid,
185 timer_name)
186
187 # just in case: databases do weird things sometimes!
188 if not classifications: # pragma: no cover
189 return
190
191 changeids = sorted(classifications.keys())
192 yield self.addBuildsetForChanges(reason='scheduler',
193 changeids=changeids)
194
195 max_changeid = changeids[-1] # (changeids are sorted)
196 yield self.master.db.schedulers.flushChangeClassifications(
197 self.objectid, less_than=max_changeid+1)
198
199 def getPendingBuildTimes(self):
200 # This isn't locked, since the caller expects and immediate value,
201 # and in any case, this is only an estimate.
202 return [timer.getTime() for timer in self._stable_timers.values() if timer and timer.active()]
203
204 class SingleBranchScheduler(BaseBasicScheduler):
205 def getChangeFilter(self, branch, branches, change_filter, categories):
206 if branch is NotABranch and not change_filter:
207 config.error(
208 "the 'branch' argument to SingleBranchScheduler is " +
209 "mandatory unless change_filter is provided")
210 elif branches is not NotABranch:
211 config.error(
212 "the 'branches' argument is not allowed for " +
213 "SingleBranchScheduler")
214
215
216 return filter.ChangeFilter.fromSchedulerConstructorArgs(
217 change_filter=change_filter, branch=branch,
218 categories=categories)
219
220 def getTimerNameForChange(self, change):
221 return "only" # this class only uses one timer
222
223 def getChangeClassificationsForTimer(self, objectid, timer_name):
224 return self.master.db.schedulers.getChangeClassifications(
225 self.objectid)
226
227
228 class Scheduler(SingleBranchScheduler):
229 "alias for SingleBranchScheduler"
230 def __init__(self, *args, **kwargs):
231 log.msg("WARNING: the name 'Scheduler' is deprecated; use " +
232 "buildbot.schedulers.basic.SingleBranchScheduler instead " +
233 "(note that this may require you to change your import " +
234 "statement)")
235 SingleBranchScheduler.__init__(self, *args, **kwargs)
236
237
238 class AnyBranchScheduler(BaseBasicScheduler):
239 def getChangeFilter(self, branch, branches, change_filter, categories):
240 assert branch is NotABranch
241 return filter.ChangeFilter.fromSchedulerConstructorArgs(
242 change_filter=change_filter, branch=branches,
243 categories=categories)
244
245 def getTimerNameForChange(self, change):
246 return change.branch
247
248 def getChangeClassificationsForTimer(self, objectid, timer_name):
249 branch = timer_name # set in getTimerNameForChange
250 return self.master.db.schedulers.getChangeClassifications(
251 self.objectid, branch=branch)
252
253 # now at buildbot.schedulers.dependent, but keep the old name alive
254 Dependent = dependent.Dependent
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py
--- a/master/buildbot/db/schedulers.py
+++ b/master/buildbot/db/schedulers.py
@@ -58,16 +58,33 @@
return self.db.pool.do(thd)
class Thunk: pass
- def getChangeClassifications(self, objectid, branch=Thunk):
+ def getChangeClassifications(self, objectid, branch=Thunk,
+ repository=Thunk, project=Thunk,
+ codebase=Thunk):
def thd(conn):
sch_ch_tbl = self.db.model.scheduler_changes
ch_tbl = self.db.model.changes
wc = (sch_ch_tbl.c.objectid == objectid)
+
+ # may need to filter further based on branch, etc
+ extra_wheres = []
if branch is not self.Thunk:
- wc = wc & (
- (sch_ch_tbl.c.changeid == ch_tbl.c.changeid) &
- (ch_tbl.c.branch == branch))
+ extra_wheres.append(ch_tbl.c.branch == branch)
+ if repository is not self.Thunk:
+ extra_wheres.append(ch_tbl.c.repository == repository)
+ if project is not self.Thunk:
+ extra_wheres.append(ch_tbl.c.project == project)
+ if codebase is not self.Thunk:
+ extra_wheres.append(ch_tbl.c.codebase == codebase)
+
+ # if we need to filter further append those, as well as a join
+ # on changeid (but just once for that one)
+ if extra_wheres:
+ wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)
+ for w in extra_wheres:
+ wc &= w
+
q = sa.select(
[ sch_ch_tbl.c.changeid, sch_ch_tbl.c.important ],
whereclause=wc)
diff --git a/master/buildbot/schedulers/basic.py b/master/buildbot/schedulers/basic.py
--- a/master/buildbot/schedulers/basic.py
+++ b/master/buildbot/schedulers/basic.py
@@ -243,12 +243,14 @@
categories=categories)
def getTimerNameForChange(self, change):
- return change.branch
+ # Py2.6+: could be a namedtuple
+ return (change.codebase, change.project, change.repository, change.branch)
def getChangeClassificationsForTimer(self, objectid, timer_name):
- branch = timer_name # set in getTimerNameForChange
+ codebase, project, repository, branch = timer_name # set in getTimerNameForChange
return self.master.db.schedulers.getChangeClassifications(
- self.objectid, branch=branch)
+ self.objectid, branch=branch, repository=repository,
+ codebase=codebase, project=project)
# now at buildbot.schedulers.dependent, but keep the old name alive
Dependent = dependent.Dependent
|
{"golden_diff": "diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py\n--- a/master/buildbot/db/schedulers.py\n+++ b/master/buildbot/db/schedulers.py\n@@ -58,16 +58,33 @@\n return self.db.pool.do(thd)\n \n class Thunk: pass\n- def getChangeClassifications(self, objectid, branch=Thunk):\n+ def getChangeClassifications(self, objectid, branch=Thunk,\n+ repository=Thunk, project=Thunk,\n+ codebase=Thunk):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n \n wc = (sch_ch_tbl.c.objectid == objectid)\n+ \n+ # may need to filter further based on branch, etc\n+ extra_wheres = [] \n if branch is not self.Thunk:\n- wc = wc & (\n- (sch_ch_tbl.c.changeid == ch_tbl.c.changeid) &\n- (ch_tbl.c.branch == branch))\n+ extra_wheres.append(ch_tbl.c.branch == branch)\n+ if repository is not self.Thunk:\n+ extra_wheres.append(ch_tbl.c.repository == repository)\n+ if project is not self.Thunk:\n+ extra_wheres.append(ch_tbl.c.project == project)\n+ if codebase is not self.Thunk:\n+ extra_wheres.append(ch_tbl.c.codebase == codebase)\n+\n+ # if we need to filter further append those, as well as a join\n+ # on changeid (but just once for that one)\n+ if extra_wheres:\n+ wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n+ for w in extra_wheres:\n+ wc &= w\n+\n q = sa.select(\n [ sch_ch_tbl.c.changeid, sch_ch_tbl.c.important ],\n whereclause=wc)\ndiff --git a/master/buildbot/schedulers/basic.py b/master/buildbot/schedulers/basic.py\n--- a/master/buildbot/schedulers/basic.py\n+++ b/master/buildbot/schedulers/basic.py\n@@ -243,12 +243,14 @@\n categories=categories)\n \n def getTimerNameForChange(self, change):\n- return change.branch\n+ # Py2.6+: could be a namedtuple\n+ return (change.codebase, change.project, change.repository, change.branch)\n \n def getChangeClassificationsForTimer(self, objectid, timer_name):\n- branch = timer_name # set in getTimerNameForChange\n+ codebase, project, repository, branch = timer_name # set in getTimerNameForChange\n return self.master.db.schedulers.getChangeClassifications(\n- self.objectid, branch=branch)\n+ self.objectid, branch=branch, repository=repository,\n+ codebase=codebase, project=project)\n \n # now at buildbot.schedulers.dependent, but keep the old name alive\n Dependent = dependent.Dependent\n", "issue": "Allow self.master to be set so it can be used before service setup.\nThis also addresses a few testing issues, caches the master property,\nand invalidates the cache on disownServiceParent. Fixes #3329.\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\nfrom buildbot.db import base\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/database.rst\n\n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n objectid=objectid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, objectid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.objectid == objectid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n class Thunk: pass\n def getChangeClassifications(self, objectid, branch=Thunk):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.objectid == objectid)\n if branch is not self.Thunk:\n wc = wc & (\n (sch_ch_tbl.c.changeid == ch_tbl.c.changeid) &\n (ch_tbl.c.branch == branch))\n q = sa.select(\n [ sch_ch_tbl.c.changeid, sch_ch_tbl.c.important ],\n whereclause=wc)\n return dict([ (r.changeid, [False,True][r.important])\n for r in conn.execute(q) ])\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer, reactor\nfrom twisted.python import log\nfrom buildbot import util, config\nfrom buildbot.util import NotABranch\nfrom collections import defaultdict\nfrom buildbot.changes import filter, changes\nfrom buildbot.schedulers import base, dependent\n\nclass BaseBasicScheduler(base.BaseScheduler):\n \"\"\"\n @param onlyImportant: If True, only important changes will be added to the\n buildset.\n @type onlyImportant: boolean\n\n \"\"\"\n\n compare_attrs = (base.BaseScheduler.compare_attrs +\n ('treeStableTimer', 'change_filter', 'fileIsImportant',\n 'onlyImportant') )\n\n _reactor = reactor # for tests\n\n fileIsImportant = None\n\n class NotSet: pass\n def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None,\n builderNames=None, branch=NotABranch, branches=NotABranch,\n fileIsImportant=None, properties={}, categories=None,\n change_filter=None, onlyImportant=False, **kwargs):\n if shouldntBeSet is not self.NotSet:\n config.error(\n \"pass arguments to schedulers using keyword arguments\")\n if fileIsImportant and not callable(fileIsImportant):\n config.error(\n \"fileIsImportant must be a callable\")\n\n # initialize parent classes\n base.BaseScheduler.__init__(self, name, builderNames, properties, **kwargs)\n\n self.treeStableTimer = treeStableTimer\n if fileIsImportant is not None:\n self.fileIsImportant = fileIsImportant\n self.onlyImportant = onlyImportant\n self.change_filter = self.getChangeFilter(branch=branch,\n branches=branches, change_filter=change_filter,\n categories=categories)\n\n # the IDelayedCall used to wake up when this scheduler's\n # treeStableTimer expires.\n self._stable_timers = defaultdict(lambda : None)\n self._stable_timers_lock = defer.DeferredLock()\n\n def getChangeFilter(self, branch, branches, change_filter, categories):\n raise NotImplementedError\n\n def startService(self, _returnDeferred=False):\n base.BaseScheduler.startService(self)\n\n d = self.startConsumingChanges(fileIsImportant=self.fileIsImportant,\n change_filter=self.change_filter,\n onlyImportant=self.onlyImportant)\n\n # if treeStableTimer is False, then we don't care about classified\n # changes, so get rid of any hanging around from previous\n # configurations\n if not self.treeStableTimer:\n d.addCallback(lambda _ :\n self.master.db.schedulers.flushChangeClassifications(\n self.objectid))\n\n # otherwise, if there are classified changes out there, start their\n # treeStableTimers again\n else:\n d.addCallback(lambda _ :\n self.scanExistingClassifiedChanges())\n\n # handle Deferred errors, since startService does not return a Deferred\n d.addErrback(log.err, \"while starting SingleBranchScheduler '%s'\"\n % self.name)\n\n if _returnDeferred:\n return d # only used in tests\n\n def stopService(self):\n # the base stopService will unsubscribe from new changes\n d = base.BaseScheduler.stopService(self)\n @util.deferredLocked(self._stable_timers_lock)\n def cancel_timers(_):\n for timer in self._stable_timers.values():\n if timer:\n timer.cancel()\n self._stable_timers.clear()\n d.addCallback(cancel_timers)\n return d\n\n @util.deferredLocked('_stable_timers_lock')\n def gotChange(self, change, important):\n if not self.treeStableTimer:\n # if there's no treeStableTimer, we can completely ignore\n # unimportant changes\n if not important:\n return defer.succeed(None)\n # otherwise, we'll build it right away\n return self.addBuildsetForChanges(reason='scheduler',\n changeids=[ change.number ])\n\n timer_name = self.getTimerNameForChange(change)\n\n # if we have a treeStableTimer, then record the change's importance\n # and:\n # - for an important change, start the timer\n # - for an unimportant change, reset the timer if it is running\n d = self.master.db.schedulers.classifyChanges(\n self.objectid, { change.number : important })\n def fix_timer(_):\n if not important and not self._stable_timers[timer_name]:\n return\n if self._stable_timers[timer_name]:\n self._stable_timers[timer_name].cancel()\n def fire_timer():\n d = self.stableTimerFired(timer_name)\n d.addErrback(log.err, \"while firing stable timer\")\n self._stable_timers[timer_name] = self._reactor.callLater(\n self.treeStableTimer, fire_timer)\n d.addCallback(fix_timer)\n return d\n\n @defer.inlineCallbacks\n def scanExistingClassifiedChanges(self):\n # call gotChange for each classified change. This is called at startup\n # and is intended to re-start the treeStableTimer for any changes that\n # had not yet been built when the scheduler was stopped.\n\n # NOTE: this may double-call gotChange for changes that arrive just as\n # the scheduler starts up. In practice, this doesn't hurt anything.\n classifications = \\\n yield self.master.db.schedulers.getChangeClassifications(\n self.objectid)\n\n # call gotChange for each change, after first fetching it from the db\n for changeid, important in classifications.iteritems():\n chdict = yield self.master.db.changes.getChange(changeid)\n\n if not chdict:\n continue\n\n change = yield changes.Change.fromChdict(self.master, chdict)\n yield self.gotChange(change, important)\n\n def getTimerNameForChange(self, change):\n raise NotImplementedError # see subclasses\n\n def getChangeClassificationsForTimer(self, objectid, timer_name):\n \"\"\"similar to db.schedulers.getChangeClassifications, but given timer\n name\"\"\"\n raise NotImplementedError # see subclasses\n\n @util.deferredLocked('_stable_timers_lock')\n @defer.inlineCallbacks\n def stableTimerFired(self, timer_name):\n # if the service has already been stoppd then just bail out\n if not self._stable_timers[timer_name]:\n return\n\n # delete this now-fired timer\n del self._stable_timers[timer_name]\n\n classifications = \\\n yield self.getChangeClassificationsForTimer(self.objectid,\n timer_name)\n\n # just in case: databases do weird things sometimes!\n if not classifications: # pragma: no cover\n return\n\n changeids = sorted(classifications.keys())\n yield self.addBuildsetForChanges(reason='scheduler',\n changeids=changeids)\n\n max_changeid = changeids[-1] # (changeids are sorted)\n yield self.master.db.schedulers.flushChangeClassifications(\n self.objectid, less_than=max_changeid+1)\n\n def getPendingBuildTimes(self):\n # This isn't locked, since the caller expects and immediate value,\n # and in any case, this is only an estimate.\n return [timer.getTime() for timer in self._stable_timers.values() if timer and timer.active()]\n\nclass SingleBranchScheduler(BaseBasicScheduler):\n def getChangeFilter(self, branch, branches, change_filter, categories):\n if branch is NotABranch and not change_filter:\n config.error(\n \"the 'branch' argument to SingleBranchScheduler is \" +\n \"mandatory unless change_filter is provided\")\n elif branches is not NotABranch:\n config.error(\n \"the 'branches' argument is not allowed for \" +\n \"SingleBranchScheduler\")\n\n\n return filter.ChangeFilter.fromSchedulerConstructorArgs(\n change_filter=change_filter, branch=branch,\n categories=categories)\n\n def getTimerNameForChange(self, change):\n return \"only\" # this class only uses one timer\n\n def getChangeClassificationsForTimer(self, objectid, timer_name):\n return self.master.db.schedulers.getChangeClassifications(\n self.objectid)\n\n\nclass Scheduler(SingleBranchScheduler):\n \"alias for SingleBranchScheduler\"\n def __init__(self, *args, **kwargs):\n log.msg(\"WARNING: the name 'Scheduler' is deprecated; use \" +\n \"buildbot.schedulers.basic.SingleBranchScheduler instead \" +\n \"(note that this may require you to change your import \" +\n \"statement)\")\n SingleBranchScheduler.__init__(self, *args, **kwargs)\n\n\nclass AnyBranchScheduler(BaseBasicScheduler):\n def getChangeFilter(self, branch, branches, change_filter, categories):\n assert branch is NotABranch\n return filter.ChangeFilter.fromSchedulerConstructorArgs(\n change_filter=change_filter, branch=branches,\n categories=categories)\n\n def getTimerNameForChange(self, change):\n return change.branch\n\n def getChangeClassificationsForTimer(self, objectid, timer_name):\n branch = timer_name # set in getTimerNameForChange\n return self.master.db.schedulers.getChangeClassifications(\n self.objectid, branch=branch)\n\n# now at buildbot.schedulers.dependent, but keep the old name alive\nDependent = dependent.Dependent\n", "path": "master/buildbot/schedulers/basic.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\nfrom buildbot.db import base\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/database.rst\n\n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n objectid=objectid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, objectid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.objectid == objectid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n class Thunk: pass\n def getChangeClassifications(self, objectid, branch=Thunk,\n repository=Thunk, project=Thunk,\n codebase=Thunk):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.objectid == objectid)\n \n # may need to filter further based on branch, etc\n extra_wheres = [] \n if branch is not self.Thunk:\n extra_wheres.append(ch_tbl.c.branch == branch)\n if repository is not self.Thunk:\n extra_wheres.append(ch_tbl.c.repository == repository)\n if project is not self.Thunk:\n extra_wheres.append(ch_tbl.c.project == project)\n if codebase is not self.Thunk:\n extra_wheres.append(ch_tbl.c.codebase == codebase)\n\n # if we need to filter further append those, as well as a join\n # on changeid (but just once for that one)\n if extra_wheres:\n wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n for w in extra_wheres:\n wc &= w\n\n q = sa.select(\n [ sch_ch_tbl.c.changeid, sch_ch_tbl.c.important ],\n whereclause=wc)\n return dict([ (r.changeid, [False,True][r.important])\n for r in conn.execute(q) ])\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}, {"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom twisted.internet import defer, reactor\nfrom twisted.python import log\nfrom buildbot import util, config\nfrom buildbot.util import NotABranch\nfrom collections import defaultdict\nfrom buildbot.changes import filter, changes\nfrom buildbot.schedulers import base, dependent\n\nclass BaseBasicScheduler(base.BaseScheduler):\n \"\"\"\n @param onlyImportant: If True, only important changes will be added to the\n buildset.\n @type onlyImportant: boolean\n\n \"\"\"\n\n compare_attrs = (base.BaseScheduler.compare_attrs +\n ('treeStableTimer', 'change_filter', 'fileIsImportant',\n 'onlyImportant') )\n\n _reactor = reactor # for tests\n\n fileIsImportant = None\n\n class NotSet: pass\n def __init__(self, name, shouldntBeSet=NotSet, treeStableTimer=None,\n builderNames=None, branch=NotABranch, branches=NotABranch,\n fileIsImportant=None, properties={}, categories=None,\n change_filter=None, onlyImportant=False, **kwargs):\n if shouldntBeSet is not self.NotSet:\n config.error(\n \"pass arguments to schedulers using keyword arguments\")\n if fileIsImportant and not callable(fileIsImportant):\n config.error(\n \"fileIsImportant must be a callable\")\n\n # initialize parent classes\n base.BaseScheduler.__init__(self, name, builderNames, properties, **kwargs)\n\n self.treeStableTimer = treeStableTimer\n if fileIsImportant is not None:\n self.fileIsImportant = fileIsImportant\n self.onlyImportant = onlyImportant\n self.change_filter = self.getChangeFilter(branch=branch,\n branches=branches, change_filter=change_filter,\n categories=categories)\n\n # the IDelayedCall used to wake up when this scheduler's\n # treeStableTimer expires.\n self._stable_timers = defaultdict(lambda : None)\n self._stable_timers_lock = defer.DeferredLock()\n\n def getChangeFilter(self, branch, branches, change_filter, categories):\n raise NotImplementedError\n\n def startService(self, _returnDeferred=False):\n base.BaseScheduler.startService(self)\n\n d = self.startConsumingChanges(fileIsImportant=self.fileIsImportant,\n change_filter=self.change_filter,\n onlyImportant=self.onlyImportant)\n\n # if treeStableTimer is False, then we don't care about classified\n # changes, so get rid of any hanging around from previous\n # configurations\n if not self.treeStableTimer:\n d.addCallback(lambda _ :\n self.master.db.schedulers.flushChangeClassifications(\n self.objectid))\n\n # otherwise, if there are classified changes out there, start their\n # treeStableTimers again\n else:\n d.addCallback(lambda _ :\n self.scanExistingClassifiedChanges())\n\n # handle Deferred errors, since startService does not return a Deferred\n d.addErrback(log.err, \"while starting SingleBranchScheduler '%s'\"\n % self.name)\n\n if _returnDeferred:\n return d # only used in tests\n\n def stopService(self):\n # the base stopService will unsubscribe from new changes\n d = base.BaseScheduler.stopService(self)\n @util.deferredLocked(self._stable_timers_lock)\n def cancel_timers(_):\n for timer in self._stable_timers.values():\n if timer:\n timer.cancel()\n self._stable_timers.clear()\n d.addCallback(cancel_timers)\n return d\n\n @util.deferredLocked('_stable_timers_lock')\n def gotChange(self, change, important):\n if not self.treeStableTimer:\n # if there's no treeStableTimer, we can completely ignore\n # unimportant changes\n if not important:\n return defer.succeed(None)\n # otherwise, we'll build it right away\n return self.addBuildsetForChanges(reason='scheduler',\n changeids=[ change.number ])\n\n timer_name = self.getTimerNameForChange(change)\n\n # if we have a treeStableTimer, then record the change's importance\n # and:\n # - for an important change, start the timer\n # - for an unimportant change, reset the timer if it is running\n d = self.master.db.schedulers.classifyChanges(\n self.objectid, { change.number : important })\n def fix_timer(_):\n if not important and not self._stable_timers[timer_name]:\n return\n if self._stable_timers[timer_name]:\n self._stable_timers[timer_name].cancel()\n def fire_timer():\n d = self.stableTimerFired(timer_name)\n d.addErrback(log.err, \"while firing stable timer\")\n self._stable_timers[timer_name] = self._reactor.callLater(\n self.treeStableTimer, fire_timer)\n d.addCallback(fix_timer)\n return d\n\n @defer.inlineCallbacks\n def scanExistingClassifiedChanges(self):\n # call gotChange for each classified change. This is called at startup\n # and is intended to re-start the treeStableTimer for any changes that\n # had not yet been built when the scheduler was stopped.\n\n # NOTE: this may double-call gotChange for changes that arrive just as\n # the scheduler starts up. In practice, this doesn't hurt anything.\n classifications = \\\n yield self.master.db.schedulers.getChangeClassifications(\n self.objectid)\n\n # call gotChange for each change, after first fetching it from the db\n for changeid, important in classifications.iteritems():\n chdict = yield self.master.db.changes.getChange(changeid)\n\n if not chdict:\n continue\n\n change = yield changes.Change.fromChdict(self.master, chdict)\n yield self.gotChange(change, important)\n\n def getTimerNameForChange(self, change):\n raise NotImplementedError # see subclasses\n\n def getChangeClassificationsForTimer(self, objectid, timer_name):\n \"\"\"similar to db.schedulers.getChangeClassifications, but given timer\n name\"\"\"\n raise NotImplementedError # see subclasses\n\n @util.deferredLocked('_stable_timers_lock')\n @defer.inlineCallbacks\n def stableTimerFired(self, timer_name):\n # if the service has already been stoppd then just bail out\n if not self._stable_timers[timer_name]:\n return\n\n # delete this now-fired timer\n del self._stable_timers[timer_name]\n\n classifications = \\\n yield self.getChangeClassificationsForTimer(self.objectid,\n timer_name)\n\n # just in case: databases do weird things sometimes!\n if not classifications: # pragma: no cover\n return\n\n changeids = sorted(classifications.keys())\n yield self.addBuildsetForChanges(reason='scheduler',\n changeids=changeids)\n\n max_changeid = changeids[-1] # (changeids are sorted)\n yield self.master.db.schedulers.flushChangeClassifications(\n self.objectid, less_than=max_changeid+1)\n\n def getPendingBuildTimes(self):\n # This isn't locked, since the caller expects and immediate value,\n # and in any case, this is only an estimate.\n return [timer.getTime() for timer in self._stable_timers.values() if timer and timer.active()]\n\nclass SingleBranchScheduler(BaseBasicScheduler):\n def getChangeFilter(self, branch, branches, change_filter, categories):\n if branch is NotABranch and not change_filter:\n config.error(\n \"the 'branch' argument to SingleBranchScheduler is \" +\n \"mandatory unless change_filter is provided\")\n elif branches is not NotABranch:\n config.error(\n \"the 'branches' argument is not allowed for \" +\n \"SingleBranchScheduler\")\n\n\n return filter.ChangeFilter.fromSchedulerConstructorArgs(\n change_filter=change_filter, branch=branch,\n categories=categories)\n\n def getTimerNameForChange(self, change):\n return \"only\" # this class only uses one timer\n\n def getChangeClassificationsForTimer(self, objectid, timer_name):\n return self.master.db.schedulers.getChangeClassifications(\n self.objectid)\n\n\nclass Scheduler(SingleBranchScheduler):\n \"alias for SingleBranchScheduler\"\n def __init__(self, *args, **kwargs):\n log.msg(\"WARNING: the name 'Scheduler' is deprecated; use \" +\n \"buildbot.schedulers.basic.SingleBranchScheduler instead \" +\n \"(note that this may require you to change your import \" +\n \"statement)\")\n SingleBranchScheduler.__init__(self, *args, **kwargs)\n\n\nclass AnyBranchScheduler(BaseBasicScheduler):\n def getChangeFilter(self, branch, branches, change_filter, categories):\n assert branch is NotABranch\n return filter.ChangeFilter.fromSchedulerConstructorArgs(\n change_filter=change_filter, branch=branches,\n categories=categories)\n\n def getTimerNameForChange(self, change):\n # Py2.6+: could be a namedtuple\n return (change.codebase, change.project, change.repository, change.branch)\n\n def getChangeClassificationsForTimer(self, objectid, timer_name):\n codebase, project, repository, branch = timer_name # set in getTimerNameForChange\n return self.master.db.schedulers.getChangeClassifications(\n self.objectid, branch=branch, repository=repository,\n codebase=codebase, project=project)\n\n# now at buildbot.schedulers.dependent, but keep the old name alive\nDependent = dependent.Dependent\n", "path": "master/buildbot/schedulers/basic.py"}]}
| 3,975 | 650 |
gh_patches_debug_50470
|
rasdani/github-patches
|
git_diff
|
cython__cython-4942
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Annotated attributes of cclass are not supporting pointers
<!--
**PLEASE READ THIS FIRST:**
- Do not use the bug and feature tracker for support requests. Use the `cython-users` mailing list instead.
- Did you search for similar issues already? Please do, it helps to save us precious time that we otherwise could not invest into development.
- Did you try the latest master branch or pre-release? It might already have what you want to report. Also see the [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) regarding recent changes.
-->
**Describe the bug**
The compilation is failing, when attribute of cclass is declared using annotated type containing pointer.
**To Reproduce**
Following code:
```python
import cython
@cython.cclass
class Foo:
a: cython.pointer(cython.int)
def bar(self):
self.a = cython.NULL
```
fails during compilation with error:
```
$ cython -3 test.py
Error compiling Cython file:
------------------------------------------------------------
...
@cython.cclass
class Foo:
a: cython.pointer(cython.int)
def bar(self):
self.a = cython.NULL
^
------------------------------------------------------------
test.py:8:23: Cannot convert 'void *' to Python object
```
**Expected behavior**
Compilation should be successfull.
**Environment (please complete the following information):**
- OS: Linux
- Python version: Python 3.9.2
- Cython version: master
**Additional context**
When `declare()` statement or `cython.p_int` type is used, compilation is successful:
```python
import cython
@cython.cclass
class Foo:
a = cython.declare(cython.pointer(cython.int))
def bar(self):
self.a = cython.NULL
```
```python
import cython
@cython.cclass
class Foo:
a: cython.p_int
def bar(self):
self.a = cython.NULL
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/tutorial/clibraries/queue.py`
Content:
```
1 from cython.cimports import cqueue
2
3 @cython.cclass
4 class Queue:
5 _c_queue = cython.declare(cython.pointer(cqueue.Queue))
6
7 def __cinit__(self):
8 self._c_queue = cqueue.queue_new()
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/examples/tutorial/clibraries/queue.py b/docs/examples/tutorial/clibraries/queue.py
--- a/docs/examples/tutorial/clibraries/queue.py
+++ b/docs/examples/tutorial/clibraries/queue.py
@@ -2,7 +2,7 @@
@cython.cclass
class Queue:
- _c_queue = cython.declare(cython.pointer(cqueue.Queue))
+ _c_queue: cython.pointer(cqueue.Queue)
def __cinit__(self):
self._c_queue = cqueue.queue_new()
|
{"golden_diff": "diff --git a/docs/examples/tutorial/clibraries/queue.py b/docs/examples/tutorial/clibraries/queue.py\n--- a/docs/examples/tutorial/clibraries/queue.py\n+++ b/docs/examples/tutorial/clibraries/queue.py\n@@ -2,7 +2,7 @@\n \n @cython.cclass\n class Queue:\n- _c_queue = cython.declare(cython.pointer(cqueue.Queue))\n+ _c_queue: cython.pointer(cqueue.Queue)\n \n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "issue": "[BUG] Annotated attributes of cclass are not supporting pointers\n<!--\r\n**PLEASE READ THIS FIRST:**\r\n- Do not use the bug and feature tracker for support requests. Use the `cython-users` mailing list instead.\r\n- Did you search for similar issues already? Please do, it helps to save us precious time that we otherwise could not invest into development.\r\n- Did you try the latest master branch or pre-release? It might already have what you want to report. Also see the [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) regarding recent changes.\r\n-->\r\n\r\n**Describe the bug**\r\nThe compilation is failing, when attribute of cclass is declared using annotated type containing pointer.\r\n\r\n**To Reproduce**\r\nFollowing code:\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a: cython.pointer(cython.int)\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\nfails during compilation with error:\r\n```\r\n$ cython -3 test.py\r\n\r\nError compiling Cython file:\r\n------------------------------------------------------------\r\n...\r\[email protected]\r\nclass Foo:\r\n a: cython.pointer(cython.int)\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n ^\r\n------------------------------------------------------------\r\n\r\ntest.py:8:23: Cannot convert 'void *' to Python object\r\n```\r\n\r\n**Expected behavior**\r\nCompilation should be successfull.\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Linux\r\n - Python version: Python 3.9.2\r\n - Cython version: master\r\n\r\n**Additional context**\r\nWhen `declare()` statement or `cython.p_int` type is used, compilation is successful:\r\n\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a = cython.declare(cython.pointer(cython.int))\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\n\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a: cython.p_int\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\n\n", "before_files": [{"content": "from cython.cimports import cqueue\n\[email protected]\nclass Queue:\n _c_queue = cython.declare(cython.pointer(cqueue.Queue))\n\n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "path": "docs/examples/tutorial/clibraries/queue.py"}], "after_files": [{"content": "from cython.cimports import cqueue\n\[email protected]\nclass Queue:\n _c_queue: cython.pointer(cqueue.Queue)\n\n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "path": "docs/examples/tutorial/clibraries/queue.py"}]}
| 761 | 114 |
gh_patches_debug_40379
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-2409
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Knowing the violation code it is quite hard to find the violation description
**Use Case.** I am testing wemake-python-styleguide on my project.
```
...
22:5 WPS337 Found multiline conditions
if (
^
Full list of violations and explanations:
https://wemake-python-stylegui.de/en/0.13.4/pages/usage/violations/
make: *** [infra/make/lint.mk:3: lint] Error 1
```
To get more information on what `WPS337` is for, I have to do several steps:
* Open https://wemake-python-stylegui.de/en/0.13.4/pages/usage/violations/
* Realize that WPS337 is probably behind the link titled `WPS300 - WPS399`
* And now I am stuck; I can `Ctrl + F` → `337`, but that won't help; so I try to find the word `multiline`
* and finally I reach the description I wanted: https://wemake-python-stylegui.de/en/0.13.4/pages/usage/violations/consistency.html#wemake_python_styleguide.violations.consistency.MultilineConditionsViolation
This takes time, especially when the title of the violation on this page does not exactly correspond to the name the tool prints on command line.
**Ideally,** I would see a link to the violation description right in the console, like this:
```
...
22:5 WPS337 Found multiline conditions https://wps.it/0.13.4/WPS337/
if (
^
...
```
Regrettably, wps.to domain is registered already, but wps.it is there.
**If that is not possible,** it would help if the docs pages at least included the violation codes, to make the page searchable via `Ctrl + F`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/violations/base.py`
Content:
```
1 """
2 Contains detailed technical information about :term:`violation` internals.
3
4 .. _violations:
5
6 Violations API
7 --------------
8
9 .. currentmodule:: wemake_python_styleguide.violations.base
10
11 .. autoclasstree:: wemake_python_styleguide.violations.base
12
13 .. autosummary::
14 :nosignatures:
15
16 ASTViolation
17 MaybeASTViolation
18 TokenizeViolation
19 SimpleViolation
20
21 Violation cannot have more than one base class.
22 See :ref:`tutorial` for more information about choosing a correct base class.
23
24 Conventions
25 ~~~~~~~~~~~
26
27 - Each violation class name should end with "Violation"
28 - Each violation must have a long docstring with full description
29 - Each violation must have "Reasoning" and "Solution" sections
30 - Each violation must have "versionadded" policy
31 - Each violation should have an example with correct and wrong usages
32 - If violation error template should have a parameter
33 it should be the last part of the text: ``: {0}``
34
35 Deprecating a violation
36 ~~~~~~~~~~~~~~~~~~~~~~~
37
38 When you want to mark some violation as deprecated,
39 then assign ``deprecated`` boolean flag to it:
40
41 .. code:: python
42
43 @final
44 class SomeViolation(ASTViolation):
45 deprecated = True
46
47 Reference
48 ~~~~~~~~~
49
50 """
51
52 import abc
53 import ast
54 import enum
55 import tokenize
56 from typing import Callable, ClassVar, Optional, Set, Tuple, Union
57
58 from typing_extensions import final
59
60 #: General type for all possible nodes where error happens.
61 ErrorNode = Union[
62 ast.AST,
63 tokenize.TokenInfo,
64 None,
65 ]
66
67 #: We use this type to define helper classes with callbacks to add violations.
68 ErrorCallback = Callable[['BaseViolation'], None]
69
70
71 @enum.unique
72 class ViolationPostfixes(enum.Enum):
73 """String values of postfixes used for violation baselines."""
74
75 bigger_than = ' > {0}'
76 less_than = ' < {0}'
77
78
79 class BaseViolation(object, metaclass=abc.ABCMeta):
80 """
81 Abstract base class for all style violations.
82
83 It basically just defines how to create any error and how to format
84 this error later on.
85
86 Each subclass must define ``error_template`` and ``code`` fields.
87
88 Attributes:
89 error_template: message that will be shown to user after formatting.
90 code: unique violation number. Used to identify the violation.
91 previous_codes: just a documentation thing to track changes in time.
92 deprecated: indicates that this violation will be removed soon.
93 postfix_template: indicates message that we show at the very end.
94
95 """
96
97 error_template: ClassVar[str]
98 code: ClassVar[int]
99 previous_codes: ClassVar[Set[int]]
100 deprecated: ClassVar[bool] = False
101
102 # We use this code to show base metrics and thresholds mostly:
103 postfix_template: ClassVar[ViolationPostfixes] = (
104 ViolationPostfixes.bigger_than
105 )
106
107 def __init__(
108 self,
109 node: ErrorNode,
110 text: Optional[str] = None,
111 baseline: Optional[int] = None,
112 ) -> None:
113 """
114 Creates a new instance of an abstract violation.
115
116 Arguments:
117 node: violation was raised by this node. If applicable.
118 text: extra text to format the final message. If applicable.
119 baseline: some complexity violations show the logic threshold here.
120
121 """
122 self._node = node
123 self._text = text
124 self._baseline = baseline
125
126 @final
127 def message(self) -> str:
128 """
129 Returns error's formatted message with code and reason.
130
131 Conditionally formats the ``error_template`` if it is required.
132 """
133 return '{0} {1}{2}'.format(
134 self._full_code(),
135 self.error_template.format(self._text),
136 self._postfix_information(),
137 )
138
139 @final
140 def node_items(self) -> Tuple[int, int, str]:
141 """Returns tuple to match ``flake8`` API format."""
142 return (*self._location(), self.message())
143
144 @final
145 def _full_code(self) -> str:
146 """
147 Returns fully formatted code.
148
149 Adds violation letter to the numbers.
150 Also ensures that codes like ``3`` will be represented as ``WPS003``.
151 """
152 return 'WPS{0}'.format(str(self.code).zfill(3))
153
154 @final
155 def _postfix_information(self) -> str:
156 """
157 Adds useful information to the end of the violation message.
158
159 Useful for complexity baselines and other thresholds.
160 """
161 if self._baseline is None:
162 return ''
163 return self.postfix_template.value.format(self._baseline)
164
165 @abc.abstractmethod
166 def _location(self) -> Tuple[int, int]:
167 """Base method for showing error location."""
168
169
170 class _BaseASTViolation(BaseViolation, metaclass=abc.ABCMeta):
171 """Used as a based type for all ``ast`` violations."""
172
173 _node: Optional[ast.AST]
174
175 @final
176 def _location(self) -> Tuple[int, int]:
177 line_number = getattr(self._node, 'lineno', 0)
178 column_offset = getattr(self._node, 'col_offset', 0)
179 return line_number, column_offset
180
181
182 class ASTViolation(_BaseASTViolation, metaclass=abc.ABCMeta):
183 """Violation for ``ast`` based style visitors."""
184
185 _node: ast.AST
186
187
188 class MaybeASTViolation(_BaseASTViolation, metaclass=abc.ABCMeta):
189 """
190 Violation for ``ast`` and modules visitors.
191
192 Is used for violations that share the same rule for nodes and module names.
193 Is wildly used for naming rules.
194 """
195
196 def __init__(
197 self,
198 node: Optional[ast.AST] = None,
199 text: Optional[str] = None,
200 baseline: Optional[int] = None,
201 ) -> None:
202 """Creates new instance of module violation without explicit node."""
203 super().__init__(node, text=text, baseline=baseline)
204
205
206 class TokenizeViolation(BaseViolation, metaclass=abc.ABCMeta):
207 """Violation for ``tokenize`` based visitors."""
208
209 _node: tokenize.TokenInfo
210
211 @final
212 def _location(self) -> Tuple[int, int]:
213 return self._node.start
214
215
216 class SimpleViolation(BaseViolation, metaclass=abc.ABCMeta):
217 """Violation for cases where there's no associated nodes."""
218
219 _node: None
220
221 def __init__(
222 self,
223 node=None,
224 text: Optional[str] = None,
225 baseline: Optional[int] = None,
226 ) -> None:
227 """Creates new instance of simple style violation."""
228 super().__init__(node, text=text, baseline=baseline)
229
230 @final
231 def _location(self) -> Tuple[int, int]:
232 """
233 Return violation location inside the file.
234
235 Default location is in the so-called "file beginning".
236 Cannot be ignored by inline ``noqa`` comments.
237 """
238 return 0, 0
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wemake_python_styleguide/violations/base.py b/wemake_python_styleguide/violations/base.py
--- a/wemake_python_styleguide/violations/base.py
+++ b/wemake_python_styleguide/violations/base.py
@@ -76,7 +76,8 @@
less_than = ' < {0}'
-class BaseViolation(object, metaclass=abc.ABCMeta):
+# TODO: remove `noqa` after a new release:
+class BaseViolation(object, metaclass=abc.ABCMeta): # noqa: WPS338
"""
Abstract base class for all style violations.
@@ -99,11 +100,34 @@
previous_codes: ClassVar[Set[int]]
deprecated: ClassVar[bool] = False
+ # assigned in __init_subclass__
+ full_code: ClassVar[str]
+ summary: ClassVar[str]
+
# We use this code to show base metrics and thresholds mostly:
postfix_template: ClassVar[ViolationPostfixes] = (
ViolationPostfixes.bigger_than
)
+ def __init_subclass__(cls, **kwargs) -> None:
+ """Sets additional values for subclasses."""
+ super().__init_subclass__(**kwargs)
+ violation_code = getattr(cls, 'code', None)
+ if violation_code is None:
+ return
+ if cls.__doc__ is None:
+ raise TypeError(
+ 'Please include a docstring documenting {0}'.format(cls),
+ )
+ # this is mostly done for docs to display the full code,
+ # allowing its indexing in search engines and better discoverability
+ cls.full_code = cls._full_code()
+ cls.summary = cls.__doc__.lstrip().split('\n', maxsplit=1)[0]
+ # this hack adds full code to summary table in the docs
+ cls.__doc__ = _prepend_skipping_whitespaces(
+ '{0} — '.format(cls.full_code), cls.__doc__,
+ )
+
def __init__(
self,
node: ErrorNode,
@@ -131,7 +155,7 @@
Conditionally formats the ``error_template`` if it is required.
"""
return '{0} {1}{2}'.format(
- self._full_code(),
+ self.full_code,
self.error_template.format(self._text),
self._postfix_information(),
)
@@ -142,14 +166,15 @@
return (*self._location(), self.message())
@final
- def _full_code(self) -> str:
+ @classmethod
+ def _full_code(cls) -> str:
"""
Returns fully formatted code.
Adds violation letter to the numbers.
Also ensures that codes like ``3`` will be represented as ``WPS003``.
"""
- return 'WPS{0}'.format(str(self.code).zfill(3))
+ return 'WPS{0}'.format(str(cls.code).zfill(3))
@final
def _postfix_information(self) -> str:
@@ -236,3 +261,9 @@
Cannot be ignored by inline ``noqa`` comments.
"""
return 0, 0
+
+
+def _prepend_skipping_whitespaces(prefix: str, text: str) -> str:
+ lstripped_text = text.lstrip()
+ leading_whitespaces = text[:len(text) - len(lstripped_text)]
+ return leading_whitespaces + prefix + lstripped_text
|
{"golden_diff": "diff --git a/wemake_python_styleguide/violations/base.py b/wemake_python_styleguide/violations/base.py\n--- a/wemake_python_styleguide/violations/base.py\n+++ b/wemake_python_styleguide/violations/base.py\n@@ -76,7 +76,8 @@\n less_than = ' < {0}'\n \n \n-class BaseViolation(object, metaclass=abc.ABCMeta):\n+# TODO: remove `noqa` after a new release:\n+class BaseViolation(object, metaclass=abc.ABCMeta): # noqa: WPS338\n \"\"\"\n Abstract base class for all style violations.\n \n@@ -99,11 +100,34 @@\n previous_codes: ClassVar[Set[int]]\n deprecated: ClassVar[bool] = False\n \n+ # assigned in __init_subclass__\n+ full_code: ClassVar[str]\n+ summary: ClassVar[str]\n+\n # We use this code to show base metrics and thresholds mostly:\n postfix_template: ClassVar[ViolationPostfixes] = (\n ViolationPostfixes.bigger_than\n )\n \n+ def __init_subclass__(cls, **kwargs) -> None:\n+ \"\"\"Sets additional values for subclasses.\"\"\"\n+ super().__init_subclass__(**kwargs)\n+ violation_code = getattr(cls, 'code', None)\n+ if violation_code is None:\n+ return\n+ if cls.__doc__ is None:\n+ raise TypeError(\n+ 'Please include a docstring documenting {0}'.format(cls),\n+ )\n+ # this is mostly done for docs to display the full code,\n+ # allowing its indexing in search engines and better discoverability\n+ cls.full_code = cls._full_code()\n+ cls.summary = cls.__doc__.lstrip().split('\\n', maxsplit=1)[0]\n+ # this hack adds full code to summary table in the docs\n+ cls.__doc__ = _prepend_skipping_whitespaces(\n+ '{0} \u2014 '.format(cls.full_code), cls.__doc__,\n+ )\n+\n def __init__(\n self,\n node: ErrorNode,\n@@ -131,7 +155,7 @@\n Conditionally formats the ``error_template`` if it is required.\n \"\"\"\n return '{0} {1}{2}'.format(\n- self._full_code(),\n+ self.full_code,\n self.error_template.format(self._text),\n self._postfix_information(),\n )\n@@ -142,14 +166,15 @@\n return (*self._location(), self.message())\n \n @final\n- def _full_code(self) -> str:\n+ @classmethod\n+ def _full_code(cls) -> str:\n \"\"\"\n Returns fully formatted code.\n \n Adds violation letter to the numbers.\n Also ensures that codes like ``3`` will be represented as ``WPS003``.\n \"\"\"\n- return 'WPS{0}'.format(str(self.code).zfill(3))\n+ return 'WPS{0}'.format(str(cls.code).zfill(3))\n \n @final\n def _postfix_information(self) -> str:\n@@ -236,3 +261,9 @@\n Cannot be ignored by inline ``noqa`` comments.\n \"\"\"\n return 0, 0\n+\n+\n+def _prepend_skipping_whitespaces(prefix: str, text: str) -> str:\n+ lstripped_text = text.lstrip()\n+ leading_whitespaces = text[:len(text) - len(lstripped_text)]\n+ return leading_whitespaces + prefix + lstripped_text\n", "issue": "Knowing the violation code it is quite hard to find the violation description\n**Use Case.** I am testing wemake-python-styleguide on my project.\r\n\r\n```\r\n...\r\n 22:5 WPS337 Found multiline conditions\r\n if (\r\n ^\r\n\r\nFull list of violations and explanations:\r\nhttps://wemake-python-stylegui.de/en/0.13.4/pages/usage/violations/\r\nmake: *** [infra/make/lint.mk:3: lint] Error 1\r\n```\r\n\r\nTo get more information on what `WPS337` is for, I have to do several steps:\r\n\r\n* Open https://wemake-python-stylegui.de/en/0.13.4/pages/usage/violations/\r\n* Realize that WPS337 is probably behind the link titled `WPS300 - WPS399`\r\n* And now I am stuck; I can `Ctrl + F` \u2192 `337`, but that won't help; so I try to find the word `multiline`\r\n* and finally I reach the description I wanted: https://wemake-python-stylegui.de/en/0.13.4/pages/usage/violations/consistency.html#wemake_python_styleguide.violations.consistency.MultilineConditionsViolation\r\n\r\nThis takes time, especially when the title of the violation on this page does not exactly correspond to the name the tool prints on command line.\r\n\r\n**Ideally,** I would see a link to the violation description right in the console, like this:\r\n\r\n```\r\n...\r\n 22:5 WPS337 Found multiline conditions https://wps.it/0.13.4/WPS337/\r\n if (\r\n ^\r\n...\r\n```\r\n\r\nRegrettably, wps.to domain is registered already, but wps.it is there.\r\n\r\n**If that is not possible,** it would help if the docs pages at least included the violation codes, to make the page searchable via `Ctrl + F`.\n", "before_files": [{"content": "\"\"\"\nContains detailed technical information about :term:`violation` internals.\n\n.. _violations:\n\nViolations API\n--------------\n\n.. currentmodule:: wemake_python_styleguide.violations.base\n\n.. autoclasstree:: wemake_python_styleguide.violations.base\n\n.. autosummary::\n :nosignatures:\n\n ASTViolation\n MaybeASTViolation\n TokenizeViolation\n SimpleViolation\n\nViolation cannot have more than one base class.\nSee :ref:`tutorial` for more information about choosing a correct base class.\n\nConventions\n~~~~~~~~~~~\n\n- Each violation class name should end with \"Violation\"\n- Each violation must have a long docstring with full description\n- Each violation must have \"Reasoning\" and \"Solution\" sections\n- Each violation must have \"versionadded\" policy\n- Each violation should have an example with correct and wrong usages\n- If violation error template should have a parameter\n it should be the last part of the text: ``: {0}``\n\nDeprecating a violation\n~~~~~~~~~~~~~~~~~~~~~~~\n\nWhen you want to mark some violation as deprecated,\nthen assign ``deprecated`` boolean flag to it:\n\n.. code:: python\n\n @final\n class SomeViolation(ASTViolation):\n deprecated = True\n\nReference\n~~~~~~~~~\n\n\"\"\"\n\nimport abc\nimport ast\nimport enum\nimport tokenize\nfrom typing import Callable, ClassVar, Optional, Set, Tuple, Union\n\nfrom typing_extensions import final\n\n#: General type for all possible nodes where error happens.\nErrorNode = Union[\n ast.AST,\n tokenize.TokenInfo,\n None,\n]\n\n#: We use this type to define helper classes with callbacks to add violations.\nErrorCallback = Callable[['BaseViolation'], None]\n\n\[email protected]\nclass ViolationPostfixes(enum.Enum):\n \"\"\"String values of postfixes used for violation baselines.\"\"\"\n\n bigger_than = ' > {0}'\n less_than = ' < {0}'\n\n\nclass BaseViolation(object, metaclass=abc.ABCMeta):\n \"\"\"\n Abstract base class for all style violations.\n\n It basically just defines how to create any error and how to format\n this error later on.\n\n Each subclass must define ``error_template`` and ``code`` fields.\n\n Attributes:\n error_template: message that will be shown to user after formatting.\n code: unique violation number. Used to identify the violation.\n previous_codes: just a documentation thing to track changes in time.\n deprecated: indicates that this violation will be removed soon.\n postfix_template: indicates message that we show at the very end.\n\n \"\"\"\n\n error_template: ClassVar[str]\n code: ClassVar[int]\n previous_codes: ClassVar[Set[int]]\n deprecated: ClassVar[bool] = False\n\n # We use this code to show base metrics and thresholds mostly:\n postfix_template: ClassVar[ViolationPostfixes] = (\n ViolationPostfixes.bigger_than\n )\n\n def __init__(\n self,\n node: ErrorNode,\n text: Optional[str] = None,\n baseline: Optional[int] = None,\n ) -> None:\n \"\"\"\n Creates a new instance of an abstract violation.\n\n Arguments:\n node: violation was raised by this node. If applicable.\n text: extra text to format the final message. If applicable.\n baseline: some complexity violations show the logic threshold here.\n\n \"\"\"\n self._node = node\n self._text = text\n self._baseline = baseline\n\n @final\n def message(self) -> str:\n \"\"\"\n Returns error's formatted message with code and reason.\n\n Conditionally formats the ``error_template`` if it is required.\n \"\"\"\n return '{0} {1}{2}'.format(\n self._full_code(),\n self.error_template.format(self._text),\n self._postfix_information(),\n )\n\n @final\n def node_items(self) -> Tuple[int, int, str]:\n \"\"\"Returns tuple to match ``flake8`` API format.\"\"\"\n return (*self._location(), self.message())\n\n @final\n def _full_code(self) -> str:\n \"\"\"\n Returns fully formatted code.\n\n Adds violation letter to the numbers.\n Also ensures that codes like ``3`` will be represented as ``WPS003``.\n \"\"\"\n return 'WPS{0}'.format(str(self.code).zfill(3))\n\n @final\n def _postfix_information(self) -> str:\n \"\"\"\n Adds useful information to the end of the violation message.\n\n Useful for complexity baselines and other thresholds.\n \"\"\"\n if self._baseline is None:\n return ''\n return self.postfix_template.value.format(self._baseline)\n\n @abc.abstractmethod\n def _location(self) -> Tuple[int, int]:\n \"\"\"Base method for showing error location.\"\"\"\n\n\nclass _BaseASTViolation(BaseViolation, metaclass=abc.ABCMeta):\n \"\"\"Used as a based type for all ``ast`` violations.\"\"\"\n\n _node: Optional[ast.AST]\n\n @final\n def _location(self) -> Tuple[int, int]:\n line_number = getattr(self._node, 'lineno', 0)\n column_offset = getattr(self._node, 'col_offset', 0)\n return line_number, column_offset\n\n\nclass ASTViolation(_BaseASTViolation, metaclass=abc.ABCMeta):\n \"\"\"Violation for ``ast`` based style visitors.\"\"\"\n\n _node: ast.AST\n\n\nclass MaybeASTViolation(_BaseASTViolation, metaclass=abc.ABCMeta):\n \"\"\"\n Violation for ``ast`` and modules visitors.\n\n Is used for violations that share the same rule for nodes and module names.\n Is wildly used for naming rules.\n \"\"\"\n\n def __init__(\n self,\n node: Optional[ast.AST] = None,\n text: Optional[str] = None,\n baseline: Optional[int] = None,\n ) -> None:\n \"\"\"Creates new instance of module violation without explicit node.\"\"\"\n super().__init__(node, text=text, baseline=baseline)\n\n\nclass TokenizeViolation(BaseViolation, metaclass=abc.ABCMeta):\n \"\"\"Violation for ``tokenize`` based visitors.\"\"\"\n\n _node: tokenize.TokenInfo\n\n @final\n def _location(self) -> Tuple[int, int]:\n return self._node.start\n\n\nclass SimpleViolation(BaseViolation, metaclass=abc.ABCMeta):\n \"\"\"Violation for cases where there's no associated nodes.\"\"\"\n\n _node: None\n\n def __init__(\n self,\n node=None,\n text: Optional[str] = None,\n baseline: Optional[int] = None,\n ) -> None:\n \"\"\"Creates new instance of simple style violation.\"\"\"\n super().__init__(node, text=text, baseline=baseline)\n\n @final\n def _location(self) -> Tuple[int, int]:\n \"\"\"\n Return violation location inside the file.\n\n Default location is in the so-called \"file beginning\".\n Cannot be ignored by inline ``noqa`` comments.\n \"\"\"\n return 0, 0\n", "path": "wemake_python_styleguide/violations/base.py"}], "after_files": [{"content": "\"\"\"\nContains detailed technical information about :term:`violation` internals.\n\n.. _violations:\n\nViolations API\n--------------\n\n.. currentmodule:: wemake_python_styleguide.violations.base\n\n.. autoclasstree:: wemake_python_styleguide.violations.base\n\n.. autosummary::\n :nosignatures:\n\n ASTViolation\n MaybeASTViolation\n TokenizeViolation\n SimpleViolation\n\nViolation cannot have more than one base class.\nSee :ref:`tutorial` for more information about choosing a correct base class.\n\nConventions\n~~~~~~~~~~~\n\n- Each violation class name should end with \"Violation\"\n- Each violation must have a long docstring with full description\n- Each violation must have \"Reasoning\" and \"Solution\" sections\n- Each violation must have \"versionadded\" policy\n- Each violation should have an example with correct and wrong usages\n- If violation error template should have a parameter\n it should be the last part of the text: ``: {0}``\n\nDeprecating a violation\n~~~~~~~~~~~~~~~~~~~~~~~\n\nWhen you want to mark some violation as deprecated,\nthen assign ``deprecated`` boolean flag to it:\n\n.. code:: python\n\n @final\n class SomeViolation(ASTViolation):\n deprecated = True\n\nReference\n~~~~~~~~~\n\n\"\"\"\n\nimport abc\nimport ast\nimport enum\nimport tokenize\nfrom typing import Callable, ClassVar, Optional, Set, Tuple, Union\n\nfrom typing_extensions import final\n\n#: General type for all possible nodes where error happens.\nErrorNode = Union[\n ast.AST,\n tokenize.TokenInfo,\n None,\n]\n\n#: We use this type to define helper classes with callbacks to add violations.\nErrorCallback = Callable[['BaseViolation'], None]\n\n\[email protected]\nclass ViolationPostfixes(enum.Enum):\n \"\"\"String values of postfixes used for violation baselines.\"\"\"\n\n bigger_than = ' > {0}'\n less_than = ' < {0}'\n\n\n# TODO: remove `noqa` after a new release:\nclass BaseViolation(object, metaclass=abc.ABCMeta): # noqa: WPS338\n \"\"\"\n Abstract base class for all style violations.\n\n It basically just defines how to create any error and how to format\n this error later on.\n\n Each subclass must define ``error_template`` and ``code`` fields.\n\n Attributes:\n error_template: message that will be shown to user after formatting.\n code: unique violation number. Used to identify the violation.\n previous_codes: just a documentation thing to track changes in time.\n deprecated: indicates that this violation will be removed soon.\n postfix_template: indicates message that we show at the very end.\n\n \"\"\"\n\n error_template: ClassVar[str]\n code: ClassVar[int]\n previous_codes: ClassVar[Set[int]]\n deprecated: ClassVar[bool] = False\n\n # assigned in __init_subclass__\n full_code: ClassVar[str]\n summary: ClassVar[str]\n\n # We use this code to show base metrics and thresholds mostly:\n postfix_template: ClassVar[ViolationPostfixes] = (\n ViolationPostfixes.bigger_than\n )\n\n def __init_subclass__(cls, **kwargs) -> None:\n \"\"\"Sets additional values for subclasses.\"\"\"\n super().__init_subclass__(**kwargs)\n violation_code = getattr(cls, 'code', None)\n if violation_code is None:\n return\n if cls.__doc__ is None:\n raise TypeError(\n 'Please include a docstring documenting {0}'.format(cls),\n )\n # this is mostly done for docs to display the full code,\n # allowing its indexing in search engines and better discoverability\n cls.full_code = cls._full_code()\n cls.summary = cls.__doc__.lstrip().split('\\n', maxsplit=1)[0]\n # this hack adds full code to summary table in the docs\n cls.__doc__ = _prepend_skipping_whitespaces(\n '{0} \u2014 '.format(cls.full_code), cls.__doc__,\n )\n\n def __init__(\n self,\n node: ErrorNode,\n text: Optional[str] = None,\n baseline: Optional[int] = None,\n ) -> None:\n \"\"\"\n Creates a new instance of an abstract violation.\n\n Arguments:\n node: violation was raised by this node. If applicable.\n text: extra text to format the final message. If applicable.\n baseline: some complexity violations show the logic threshold here.\n\n \"\"\"\n self._node = node\n self._text = text\n self._baseline = baseline\n\n @final\n def message(self) -> str:\n \"\"\"\n Returns error's formatted message with code and reason.\n\n Conditionally formats the ``error_template`` if it is required.\n \"\"\"\n return '{0} {1}{2}'.format(\n self.full_code,\n self.error_template.format(self._text),\n self._postfix_information(),\n )\n\n @final\n def node_items(self) -> Tuple[int, int, str]:\n \"\"\"Returns tuple to match ``flake8`` API format.\"\"\"\n return (*self._location(), self.message())\n\n @final\n @classmethod\n def _full_code(cls) -> str:\n \"\"\"\n Returns fully formatted code.\n\n Adds violation letter to the numbers.\n Also ensures that codes like ``3`` will be represented as ``WPS003``.\n \"\"\"\n return 'WPS{0}'.format(str(cls.code).zfill(3))\n\n @final\n def _postfix_information(self) -> str:\n \"\"\"\n Adds useful information to the end of the violation message.\n\n Useful for complexity baselines and other thresholds.\n \"\"\"\n if self._baseline is None:\n return ''\n return self.postfix_template.value.format(self._baseline)\n\n @abc.abstractmethod\n def _location(self) -> Tuple[int, int]:\n \"\"\"Base method for showing error location.\"\"\"\n\n\nclass _BaseASTViolation(BaseViolation, metaclass=abc.ABCMeta):\n \"\"\"Used as a based type for all ``ast`` violations.\"\"\"\n\n _node: Optional[ast.AST]\n\n @final\n def _location(self) -> Tuple[int, int]:\n line_number = getattr(self._node, 'lineno', 0)\n column_offset = getattr(self._node, 'col_offset', 0)\n return line_number, column_offset\n\n\nclass ASTViolation(_BaseASTViolation, metaclass=abc.ABCMeta):\n \"\"\"Violation for ``ast`` based style visitors.\"\"\"\n\n _node: ast.AST\n\n\nclass MaybeASTViolation(_BaseASTViolation, metaclass=abc.ABCMeta):\n \"\"\"\n Violation for ``ast`` and modules visitors.\n\n Is used for violations that share the same rule for nodes and module names.\n Is wildly used for naming rules.\n \"\"\"\n\n def __init__(\n self,\n node: Optional[ast.AST] = None,\n text: Optional[str] = None,\n baseline: Optional[int] = None,\n ) -> None:\n \"\"\"Creates new instance of module violation without explicit node.\"\"\"\n super().__init__(node, text=text, baseline=baseline)\n\n\nclass TokenizeViolation(BaseViolation, metaclass=abc.ABCMeta):\n \"\"\"Violation for ``tokenize`` based visitors.\"\"\"\n\n _node: tokenize.TokenInfo\n\n @final\n def _location(self) -> Tuple[int, int]:\n return self._node.start\n\n\nclass SimpleViolation(BaseViolation, metaclass=abc.ABCMeta):\n \"\"\"Violation for cases where there's no associated nodes.\"\"\"\n\n _node: None\n\n def __init__(\n self,\n node=None,\n text: Optional[str] = None,\n baseline: Optional[int] = None,\n ) -> None:\n \"\"\"Creates new instance of simple style violation.\"\"\"\n super().__init__(node, text=text, baseline=baseline)\n\n @final\n def _location(self) -> Tuple[int, int]:\n \"\"\"\n Return violation location inside the file.\n\n Default location is in the so-called \"file beginning\".\n Cannot be ignored by inline ``noqa`` comments.\n \"\"\"\n return 0, 0\n\n\ndef _prepend_skipping_whitespaces(prefix: str, text: str) -> str:\n lstripped_text = text.lstrip()\n leading_whitespaces = text[:len(text) - len(lstripped_text)]\n return leading_whitespaces + prefix + lstripped_text\n", "path": "wemake_python_styleguide/violations/base.py"}]}
| 2,822 | 794 |
gh_patches_debug_6338
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-1164
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dbt trying to parse tags in analyses even when wrapped in `raw` tags
## Issue
### Steps to Reproduce
1. Put an unknown tag within `raw` tags in a sql file, in your _analysis_ directory (note that the bug doesn't occur in a model).
```
{% raw %}
{% form %}
date_part:
type: select
default: day
options: [hour, day, week, month]
{% endform %}
{% endraw %}
```
2. run `dbt compile`
### Expected Result
The following should be compiled:
```
{% form %}
date_part:
type: select
default: day
options: [hour, day, week, month]
{% endform %}
```
### Actual Result
Error on compilation:
```bash
Found 0 models, 0 tests, 0 archives, 1 analyses, 122 macros, 2 operations, 0 seed files
10:56:47 | Concurrency: 1 threads (target='dev')
10:56:47 |
Encountered an error:
Runtime Error
Compilation Error in analysis form_tag (analysis/analysis/form_tag.sql)
Encountered unknown tag 'form'.
line 2
{% form %}
```
### System information
OS: MacOS Mojave
Python 3.7.1
dbt 0.12.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dbt/compilation.py`
Content:
```
1 import itertools
2 import os
3 import json
4 from collections import OrderedDict, defaultdict
5 import sqlparse
6
7 import dbt.utils
8 import dbt.include
9 import dbt.tracking
10
11 from dbt.utils import get_materialization, NodeType, is_type
12
13 from dbt.linker import Linker
14
15 import dbt.compat
16 import dbt.context.runtime
17 import dbt.contracts.project
18 import dbt.exceptions
19 import dbt.flags
20 import dbt.loader
21 import dbt.config
22 from dbt.contracts.graph.compiled import CompiledNode, CompiledGraph
23
24 from dbt.clients.system import write_json
25 from dbt.logger import GLOBAL_LOGGER as logger
26
27 graph_file_name = 'graph.gpickle'
28 manifest_file_name = 'manifest.json'
29
30
31 def print_compile_stats(stats):
32 names = {
33 NodeType.Model: 'models',
34 NodeType.Test: 'tests',
35 NodeType.Archive: 'archives',
36 NodeType.Analysis: 'analyses',
37 NodeType.Macro: 'macros',
38 NodeType.Operation: 'operations',
39 NodeType.Seed: 'seed files',
40 }
41
42 results = {k: 0 for k in names.keys()}
43 results.update(stats)
44
45 stat_line = ", ".join(
46 ["{} {}".format(ct, names.get(t)) for t, ct in results.items()])
47
48 logger.info("Found {}".format(stat_line))
49
50
51 def _add_prepended_cte(prepended_ctes, new_cte):
52 for dct in prepended_ctes:
53 if dct['id'] == new_cte['id']:
54 dct['sql'] = new_cte['sql']
55 return
56 prepended_ctes.append(new_cte)
57
58
59 def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
60 for new_cte in new_prepended_ctes:
61 _add_prepended_cte(prepended_ctes, new_cte)
62
63
64 def prepend_ctes(model, manifest):
65 model, _, manifest = recursively_prepend_ctes(model, manifest)
66
67 return (model, manifest)
68
69
70 def recursively_prepend_ctes(model, manifest):
71 if model.extra_ctes_injected:
72 return (model, model.extra_ctes, manifest)
73
74 if dbt.flags.STRICT_MODE:
75 # ensure that all the nodes in this manifest are compiled
76 CompiledGraph(**manifest.to_flat_graph())
77
78 prepended_ctes = []
79
80 for cte in model.extra_ctes:
81 cte_id = cte['id']
82 cte_to_add = manifest.nodes.get(cte_id)
83 cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(
84 cte_to_add, manifest)
85
86 _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
87 new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))
88 sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql)
89 _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})
90
91 model.prepend_ctes(prepended_ctes)
92
93 manifest.nodes[model.unique_id] = model
94
95 return (model, prepended_ctes, manifest)
96
97
98 class Compiler(object):
99 def __init__(self, config):
100 self.config = config
101
102 def initialize(self):
103 dbt.clients.system.make_directory(self.config.target_path)
104 dbt.clients.system.make_directory(self.config.modules_path)
105
106 def compile_node(self, node, manifest, extra_context=None):
107 if extra_context is None:
108 extra_context = {}
109
110 logger.debug("Compiling {}".format(node.get('unique_id')))
111
112 data = node.to_dict()
113 data.update({
114 'compiled': False,
115 'compiled_sql': None,
116 'extra_ctes_injected': False,
117 'extra_ctes': [],
118 'injected_sql': None,
119 })
120 compiled_node = CompiledNode(**data)
121
122 context = dbt.context.runtime.generate(
123 compiled_node, self.config, manifest)
124 context.update(extra_context)
125
126 compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
127 node.get('raw_sql'),
128 context,
129 node)
130
131 compiled_node.compiled = True
132
133 injected_node, _ = prepend_ctes(compiled_node, manifest)
134
135 should_wrap = {NodeType.Test, NodeType.Analysis, NodeType.Operation}
136 if injected_node.resource_type in should_wrap:
137 # data tests get wrapped in count(*)
138 # TODO : move this somewhere more reasonable
139 if 'data' in injected_node.tags and \
140 is_type(injected_node, NodeType.Test):
141 injected_node.wrapped_sql = (
142 "select count(*) from (\n{test_sql}\n) sbq").format(
143 test_sql=injected_node.injected_sql)
144 else:
145 # don't wrap schema tests or analyses.
146 injected_node.wrapped_sql = injected_node.injected_sql
147
148 elif is_type(injected_node, NodeType.Archive):
149 # unfortunately we do everything automagically for
150 # archives. in the future it'd be nice to generate
151 # the SQL at the parser level.
152 pass
153
154 elif(is_type(injected_node, NodeType.Model) and
155 get_materialization(injected_node) == 'ephemeral'):
156 pass
157
158 else:
159 injected_node.wrapped_sql = None
160
161 return injected_node
162
163 def write_manifest_file(self, manifest):
164 """Write the manifest file to disk.
165
166 manifest should be a Manifest.
167 """
168 filename = manifest_file_name
169 manifest_path = os.path.join(self.config.target_path, filename)
170 write_json(manifest_path, manifest.serialize())
171
172 def write_graph_file(self, linker):
173 filename = graph_file_name
174 graph_path = os.path.join(self.config.target_path, filename)
175 linker.write_graph(graph_path)
176
177 def link_node(self, linker, node, manifest):
178 linker.add_node(node.unique_id)
179
180 linker.update_node_data(
181 node.unique_id,
182 node.to_dict())
183
184 for dependency in node.depends_on_nodes:
185 if manifest.nodes.get(dependency):
186 linker.dependency(
187 node.unique_id,
188 (manifest.nodes.get(dependency).unique_id))
189
190 else:
191 dbt.exceptions.dependency_not_found(node, dependency)
192
193 def link_graph(self, linker, manifest):
194 for node in manifest.nodes.values():
195 self.link_node(linker, node, manifest)
196
197 cycle = linker.find_cycles()
198
199 if cycle:
200 raise RuntimeError("Found a cycle: {}".format(cycle))
201
202 def get_all_projects(self):
203 all_projects = {self.config.project_name: self.config}
204 dependency_projects = dbt.utils.dependency_projects(self.config)
205
206 for project_cfg in dependency_projects:
207 name = project_cfg.project_name
208 all_projects[name] = project_cfg
209
210 if dbt.flags.STRICT_MODE:
211 dbt.contracts.project.ProjectList(**all_projects)
212
213 return all_projects
214
215 def _check_resource_uniqueness(cls, manifest):
216 names_resources = {}
217 alias_resources = {}
218
219 for resource, node in manifest.nodes.items():
220 if node.resource_type not in NodeType.refable():
221 continue
222
223 name = node.name
224 alias = "{}.{}".format(node.schema, node.alias)
225
226 existing_node = names_resources.get(name)
227 if existing_node is not None:
228 dbt.exceptions.raise_duplicate_resource_name(
229 existing_node, node)
230
231 existing_alias = alias_resources.get(alias)
232 if existing_alias is not None:
233 dbt.exceptions.raise_ambiguous_alias(
234 existing_alias, node)
235
236 names_resources[name] = node
237 alias_resources[alias] = node
238
239 def compile(self):
240 linker = Linker()
241
242 all_projects = self.get_all_projects()
243
244 manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)
245
246 self.write_manifest_file(manifest)
247
248 self._check_resource_uniqueness(manifest)
249
250 resource_fqns = manifest.get_resource_fqns()
251 self.config.warn_for_unused_resource_config_paths(resource_fqns,
252 manifest.disabled)
253
254 self.link_graph(linker, manifest)
255
256 stats = defaultdict(int)
257
258 for node_name, node in itertools.chain(
259 manifest.nodes.items(),
260 manifest.macros.items()):
261 stats[node.resource_type] += 1
262
263 self.write_graph_file(linker)
264 print_compile_stats(stats)
265
266 return manifest, linker
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dbt/compilation.py b/dbt/compilation.py
--- a/dbt/compilation.py
+++ b/dbt/compilation.py
@@ -132,7 +132,7 @@
injected_node, _ = prepend_ctes(compiled_node, manifest)
- should_wrap = {NodeType.Test, NodeType.Analysis, NodeType.Operation}
+ should_wrap = {NodeType.Test, NodeType.Operation}
if injected_node.resource_type in should_wrap:
# data tests get wrapped in count(*)
# TODO : move this somewhere more reasonable
|
{"golden_diff": "diff --git a/dbt/compilation.py b/dbt/compilation.py\n--- a/dbt/compilation.py\n+++ b/dbt/compilation.py\n@@ -132,7 +132,7 @@\n \n injected_node, _ = prepend_ctes(compiled_node, manifest)\n \n- should_wrap = {NodeType.Test, NodeType.Analysis, NodeType.Operation}\n+ should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n", "issue": "dbt trying to parse tags in analyses even when wrapped in `raw` tags\n## Issue\r\n\r\n### Steps to Reproduce\r\n1. Put an unknown tag within `raw` tags in a sql file, in your _analysis_ directory (note that the bug doesn't occur in a model).\r\n```\r\n{% raw %}\r\n{% form %}\r\ndate_part:\r\n type: select\r\n default: day\r\n options: [hour, day, week, month]\r\n{% endform %}\r\n{% endraw %}\r\n```\r\n2. run `dbt compile`\r\n\r\n### Expected Result\r\nThe following should be compiled:\r\n```\r\n{% form %}\r\ndate_part:\r\n type: select\r\n default: day\r\n options: [hour, day, week, month]\r\n{% endform %}\r\n```\r\n\r\n\r\n### Actual Result\r\nError on compilation:\r\n```bash\r\nFound 0 models, 0 tests, 0 archives, 1 analyses, 122 macros, 2 operations, 0 seed files\r\n\r\n10:56:47 | Concurrency: 1 threads (target='dev')\r\n10:56:47 |\r\nEncountered an error:\r\nRuntime Error\r\n Compilation Error in analysis form_tag (analysis/analysis/form_tag.sql)\r\n Encountered unknown tag 'form'.\r\n line 2\r\n {% form %}\r\n```\r\n\r\n### System information\r\nOS: MacOS Mojave\r\nPython 3.7.1\r\ndbt 0.12.1\r\n\r\n\r\n\n", "before_files": [{"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt.utils import get_materialization, NodeType, is_type\n\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\nmanifest_file_name = 'manifest.json'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that all the nodes in this manifest are compiled\n CompiledGraph(**manifest.to_flat_graph())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Analysis, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_manifest_file(self, manifest):\n \"\"\"Write the manifest file to disk.\n\n manifest should be a Manifest.\n \"\"\"\n filename = manifest_file_name\n manifest_path = os.path.join(self.config.target_path, filename)\n write_json(manifest_path, manifest.serialize())\n\n def write_graph_file(self, linker):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n linker.update_node_data(\n node.unique_id,\n node.to_dict())\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def get_all_projects(self):\n all_projects = {self.config.project_name: self.config}\n dependency_projects = dbt.utils.dependency_projects(self.config)\n\n for project_cfg in dependency_projects:\n name = project_cfg.project_name\n all_projects[name] = project_cfg\n\n if dbt.flags.STRICT_MODE:\n dbt.contracts.project.ProjectList(**all_projects)\n\n return all_projects\n\n def _check_resource_uniqueness(cls, manifest):\n names_resources = {}\n alias_resources = {}\n\n for resource, node in manifest.nodes.items():\n if node.resource_type not in NodeType.refable():\n continue\n\n name = node.name\n alias = \"{}.{}\".format(node.schema, node.alias)\n\n existing_node = names_resources.get(name)\n if existing_node is not None:\n dbt.exceptions.raise_duplicate_resource_name(\n existing_node, node)\n\n existing_alias = alias_resources.get(alias)\n if existing_alias is not None:\n dbt.exceptions.raise_ambiguous_alias(\n existing_alias, node)\n\n names_resources[name] = node\n alias_resources[alias] = node\n\n def compile(self):\n linker = Linker()\n\n all_projects = self.get_all_projects()\n\n manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)\n\n self.write_manifest_file(manifest)\n\n self._check_resource_uniqueness(manifest)\n\n resource_fqns = manifest.get_resource_fqns()\n self.config.warn_for_unused_resource_config_paths(resource_fqns,\n manifest.disabled)\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker)\n print_compile_stats(stats)\n\n return manifest, linker\n", "path": "dbt/compilation.py"}], "after_files": [{"content": "import itertools\nimport os\nimport json\nfrom collections import OrderedDict, defaultdict\nimport sqlparse\n\nimport dbt.utils\nimport dbt.include\nimport dbt.tracking\n\nfrom dbt.utils import get_materialization, NodeType, is_type\n\nfrom dbt.linker import Linker\n\nimport dbt.compat\nimport dbt.context.runtime\nimport dbt.contracts.project\nimport dbt.exceptions\nimport dbt.flags\nimport dbt.loader\nimport dbt.config\nfrom dbt.contracts.graph.compiled import CompiledNode, CompiledGraph\n\nfrom dbt.clients.system import write_json\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\ngraph_file_name = 'graph.gpickle'\nmanifest_file_name = 'manifest.json'\n\n\ndef print_compile_stats(stats):\n names = {\n NodeType.Model: 'models',\n NodeType.Test: 'tests',\n NodeType.Archive: 'archives',\n NodeType.Analysis: 'analyses',\n NodeType.Macro: 'macros',\n NodeType.Operation: 'operations',\n NodeType.Seed: 'seed files',\n }\n\n results = {k: 0 for k in names.keys()}\n results.update(stats)\n\n stat_line = \", \".join(\n [\"{} {}\".format(ct, names.get(t)) for t, ct in results.items()])\n\n logger.info(\"Found {}\".format(stat_line))\n\n\ndef _add_prepended_cte(prepended_ctes, new_cte):\n for dct in prepended_ctes:\n if dct['id'] == new_cte['id']:\n dct['sql'] = new_cte['sql']\n return\n prepended_ctes.append(new_cte)\n\n\ndef _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):\n for new_cte in new_prepended_ctes:\n _add_prepended_cte(prepended_ctes, new_cte)\n\n\ndef prepend_ctes(model, manifest):\n model, _, manifest = recursively_prepend_ctes(model, manifest)\n\n return (model, manifest)\n\n\ndef recursively_prepend_ctes(model, manifest):\n if model.extra_ctes_injected:\n return (model, model.extra_ctes, manifest)\n\n if dbt.flags.STRICT_MODE:\n # ensure that all the nodes in this manifest are compiled\n CompiledGraph(**manifest.to_flat_graph())\n\n prepended_ctes = []\n\n for cte in model.extra_ctes:\n cte_id = cte['id']\n cte_to_add = manifest.nodes.get(cte_id)\n cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(\n cte_to_add, manifest)\n\n _extend_prepended_ctes(prepended_ctes, new_prepended_ctes)\n new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))\n sql = ' {} as (\\n{}\\n)'.format(new_cte_name, cte_to_add.compiled_sql)\n _add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})\n\n model.prepend_ctes(prepended_ctes)\n\n manifest.nodes[model.unique_id] = model\n\n return (model, prepended_ctes, manifest)\n\n\nclass Compiler(object):\n def __init__(self, config):\n self.config = config\n\n def initialize(self):\n dbt.clients.system.make_directory(self.config.target_path)\n dbt.clients.system.make_directory(self.config.modules_path)\n\n def compile_node(self, node, manifest, extra_context=None):\n if extra_context is None:\n extra_context = {}\n\n logger.debug(\"Compiling {}\".format(node.get('unique_id')))\n\n data = node.to_dict()\n data.update({\n 'compiled': False,\n 'compiled_sql': None,\n 'extra_ctes_injected': False,\n 'extra_ctes': [],\n 'injected_sql': None,\n })\n compiled_node = CompiledNode(**data)\n\n context = dbt.context.runtime.generate(\n compiled_node, self.config, manifest)\n context.update(extra_context)\n\n compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(\n node.get('raw_sql'),\n context,\n node)\n\n compiled_node.compiled = True\n\n injected_node, _ = prepend_ctes(compiled_node, manifest)\n\n should_wrap = {NodeType.Test, NodeType.Operation}\n if injected_node.resource_type in should_wrap:\n # data tests get wrapped in count(*)\n # TODO : move this somewhere more reasonable\n if 'data' in injected_node.tags and \\\n is_type(injected_node, NodeType.Test):\n injected_node.wrapped_sql = (\n \"select count(*) from (\\n{test_sql}\\n) sbq\").format(\n test_sql=injected_node.injected_sql)\n else:\n # don't wrap schema tests or analyses.\n injected_node.wrapped_sql = injected_node.injected_sql\n\n elif is_type(injected_node, NodeType.Archive):\n # unfortunately we do everything automagically for\n # archives. in the future it'd be nice to generate\n # the SQL at the parser level.\n pass\n\n elif(is_type(injected_node, NodeType.Model) and\n get_materialization(injected_node) == 'ephemeral'):\n pass\n\n else:\n injected_node.wrapped_sql = None\n\n return injected_node\n\n def write_manifest_file(self, manifest):\n \"\"\"Write the manifest file to disk.\n\n manifest should be a Manifest.\n \"\"\"\n filename = manifest_file_name\n manifest_path = os.path.join(self.config.target_path, filename)\n write_json(manifest_path, manifest.serialize())\n\n def write_graph_file(self, linker):\n filename = graph_file_name\n graph_path = os.path.join(self.config.target_path, filename)\n linker.write_graph(graph_path)\n\n def link_node(self, linker, node, manifest):\n linker.add_node(node.unique_id)\n\n linker.update_node_data(\n node.unique_id,\n node.to_dict())\n\n for dependency in node.depends_on_nodes:\n if manifest.nodes.get(dependency):\n linker.dependency(\n node.unique_id,\n (manifest.nodes.get(dependency).unique_id))\n\n else:\n dbt.exceptions.dependency_not_found(node, dependency)\n\n def link_graph(self, linker, manifest):\n for node in manifest.nodes.values():\n self.link_node(linker, node, manifest)\n\n cycle = linker.find_cycles()\n\n if cycle:\n raise RuntimeError(\"Found a cycle: {}\".format(cycle))\n\n def get_all_projects(self):\n all_projects = {self.config.project_name: self.config}\n dependency_projects = dbt.utils.dependency_projects(self.config)\n\n for project_cfg in dependency_projects:\n name = project_cfg.project_name\n all_projects[name] = project_cfg\n\n if dbt.flags.STRICT_MODE:\n dbt.contracts.project.ProjectList(**all_projects)\n\n return all_projects\n\n def _check_resource_uniqueness(cls, manifest):\n names_resources = {}\n alias_resources = {}\n\n for resource, node in manifest.nodes.items():\n if node.resource_type not in NodeType.refable():\n continue\n\n name = node.name\n alias = \"{}.{}\".format(node.schema, node.alias)\n\n existing_node = names_resources.get(name)\n if existing_node is not None:\n dbt.exceptions.raise_duplicate_resource_name(\n existing_node, node)\n\n existing_alias = alias_resources.get(alias)\n if existing_alias is not None:\n dbt.exceptions.raise_ambiguous_alias(\n existing_alias, node)\n\n names_resources[name] = node\n alias_resources[alias] = node\n\n def compile(self):\n linker = Linker()\n\n all_projects = self.get_all_projects()\n\n manifest = dbt.loader.GraphLoader.load_all(self.config, all_projects)\n\n self.write_manifest_file(manifest)\n\n self._check_resource_uniqueness(manifest)\n\n resource_fqns = manifest.get_resource_fqns()\n self.config.warn_for_unused_resource_config_paths(resource_fqns,\n manifest.disabled)\n\n self.link_graph(linker, manifest)\n\n stats = defaultdict(int)\n\n for node_name, node in itertools.chain(\n manifest.nodes.items(),\n manifest.macros.items()):\n stats[node.resource_type] += 1\n\n self.write_graph_file(linker)\n print_compile_stats(stats)\n\n return manifest, linker\n", "path": "dbt/compilation.py"}]}
| 3,076 | 125 |
gh_patches_debug_41296
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-768
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nikola needs a -q (quiet) switch for testing and other automated tasks
Just needs to hide NOTICEs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/main.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2013 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import print_function, unicode_literals
28 from operator import attrgetter
29 import os
30 import shutil
31 import sys
32 import traceback
33
34 from doit.loader import generate_tasks
35 from doit.cmd_base import TaskLoader
36 from doit.reporter import ExecutedOnlyReporter
37 from doit.doit_cmd import DoitMain
38 from doit.cmd_help import Help as DoitHelp
39 from doit.cmd_run import Run as DoitRun
40 from doit.cmd_clean import Clean as DoitClean
41
42 from . import __version__
43 from .nikola import Nikola
44 from .utils import _reload, sys_decode, LOGGER, STRICT_HANDLER
45
46
47 config = {}
48
49
50 def main(args):
51 if len(args) > 0 and args[0] == 'build' and '--strict' in args:
52 LOGGER.notice('Running in strict mode')
53 STRICT_HANDLER.push_application()
54 global config
55 sys.path.append('')
56 try:
57 import conf
58 _reload(conf)
59 config = conf.__dict__
60 except Exception:
61 if os.path.exists('conf.py'):
62 msg = traceback.format_exc(0).splitlines()[1]
63 LOGGER.error('In conf.py line {0}: {1}'.format(sys.exc_info()[2].tb_lineno, msg))
64 sys.exit(1)
65 config = {}
66
67 site = Nikola(**config)
68 return DoitNikola(site).run(args)
69
70
71 class Help(DoitHelp):
72 """show Nikola usage instead of doit """
73
74 @staticmethod
75 def print_usage(cmds):
76 """print nikola "usage" (basic help) instructions"""
77 print("Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit http://getnikola.com\n\n")
78 print("Available commands:")
79 for cmd in sorted(cmds.values(), key=attrgetter('name')):
80 print(" nikola %-*s %s" % (20, cmd.name, cmd.doc_purpose))
81 print("")
82 print(" nikola help show help / reference")
83 print(" nikola help <command> show command usage")
84 print(" nikola help <task-name> show task usage")
85
86
87 class Build(DoitRun):
88 """expose "run" command as "build" for backward compatibility"""
89 def __init__(self, *args, **kw):
90 opts = list(self.cmd_options)
91 opts.append(
92 {
93 'name': 'strict',
94 'long': 'strict',
95 'default': False,
96 'type': bool,
97 'help': "Fail on things that would normally be warnings.",
98 }
99 )
100 self.cmd_options = tuple(opts)
101 super(Build, self).__init__(*args, **kw)
102
103
104 class Clean(DoitClean):
105 """A clean that removes cache/"""
106
107 def clean_tasks(self, tasks, dryrun):
108 if not dryrun and config:
109 cache_folder = config.get('CACHE_FOLDER', 'cache')
110 if os.path.exists(cache_folder):
111 shutil.rmtree(cache_folder)
112 return super(Clean, self).clean_tasks(tasks, dryrun)
113
114
115 class NikolaTaskLoader(TaskLoader):
116 """custom task loader to get tasks from Nikola instead of dodo.py file"""
117 def __init__(self, nikola):
118 self.nikola = nikola
119
120 def load_tasks(self, cmd, opt_values, pos_args):
121 DOIT_CONFIG = {
122 'reporter': ExecutedOnlyReporter,
123 'default_tasks': ['render_site', 'post_render'],
124 }
125 tasks = generate_tasks('render_site', self.nikola.gen_tasks('render_site', "Task"))
126 latetasks = generate_tasks('post_render', self.nikola.gen_tasks('post_render', "LateTask"))
127 return tasks + latetasks, DOIT_CONFIG
128
129
130 class DoitNikola(DoitMain):
131 # overwite help command
132 DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean]
133 TASK_LOADER = NikolaTaskLoader
134
135 def __init__(self, nikola):
136 self.nikola = nikola
137 self.task_loader = self.TASK_LOADER(nikola)
138
139 def get_commands(self):
140 # core doit commands
141 cmds = DoitMain.get_commands(self)
142 # load nikola commands
143 for name, cmd in self.nikola.commands.items():
144 cmds[name] = cmd
145 return cmds
146
147 def run(self, cmd_args):
148 sub_cmds = self.get_commands()
149 args = self.process_args(cmd_args)
150 args = [sys_decode(arg) for arg in args]
151
152 if len(args) == 0 or any(arg in ["--help", '-h'] for arg in args):
153 cmd_args = ['help']
154 args = ['help']
155 # Hide run because Nikola uses build
156 sub_cmds.pop('run')
157
158 if len(args) == 0 or args[0] not in sub_cmds.keys() or \
159 args[0] == 'build':
160 # Check for conf.py before launching run
161 if not self.nikola.configured:
162 LOGGER.error("This command needs to run inside an "
163 "existing Nikola site.")
164 return False
165 return super(DoitNikola, self).run(cmd_args)
166
167 @staticmethod
168 def print_version():
169 print("Nikola version " + __version__)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/main.py b/nikola/main.py
--- a/nikola/main.py
+++ b/nikola/main.py
@@ -38,6 +38,7 @@
from doit.cmd_help import Help as DoitHelp
from doit.cmd_run import Run as DoitRun
from doit.cmd_clean import Clean as DoitClean
+from logbook import NullHandler
from . import __version__
from .nikola import Nikola
@@ -48,9 +49,14 @@
def main(args):
+ quiet = False
if len(args) > 0 and args[0] == 'build' and '--strict' in args:
LOGGER.notice('Running in strict mode')
STRICT_HANDLER.push_application()
+ if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:
+ nullhandler = NullHandler()
+ nullhandler.push_application()
+ quiet = True
global config
sys.path.append('')
try:
@@ -65,7 +71,7 @@
config = {}
site = Nikola(**config)
- return DoitNikola(site).run(args)
+ return DoitNikola(site, quiet).run(args)
class Help(DoitHelp):
@@ -97,6 +103,16 @@
'help': "Fail on things that would normally be warnings.",
}
)
+ opts.append(
+ {
+ 'name': 'quiet',
+ 'long': 'quiet',
+ 'short': 'q',
+ 'default': False,
+ 'type': bool,
+ 'help': "Run quietly.",
+ }
+ )
self.cmd_options = tuple(opts)
super(Build, self).__init__(*args, **kw)
@@ -114,14 +130,21 @@
class NikolaTaskLoader(TaskLoader):
"""custom task loader to get tasks from Nikola instead of dodo.py file"""
- def __init__(self, nikola):
+ def __init__(self, nikola, quiet=False):
self.nikola = nikola
+ self.quiet = quiet
def load_tasks(self, cmd, opt_values, pos_args):
- DOIT_CONFIG = {
- 'reporter': ExecutedOnlyReporter,
- 'default_tasks': ['render_site', 'post_render'],
- }
+ if self.quiet:
+ DOIT_CONFIG = {
+ 'verbosity': 0,
+ 'reporter': 'zero',
+ }
+ else:
+ DOIT_CONFIG = {
+ 'reporter': ExecutedOnlyReporter,
+ }
+ DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']
tasks = generate_tasks('render_site', self.nikola.gen_tasks('render_site', "Task"))
latetasks = generate_tasks('post_render', self.nikola.gen_tasks('post_render', "LateTask"))
return tasks + latetasks, DOIT_CONFIG
@@ -132,9 +155,9 @@
DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean]
TASK_LOADER = NikolaTaskLoader
- def __init__(self, nikola):
+ def __init__(self, nikola, quiet=False):
self.nikola = nikola
- self.task_loader = self.TASK_LOADER(nikola)
+ self.task_loader = self.TASK_LOADER(nikola, quiet)
def get_commands(self):
# core doit commands
|
{"golden_diff": "diff --git a/nikola/main.py b/nikola/main.py\n--- a/nikola/main.py\n+++ b/nikola/main.py\n@@ -38,6 +38,7 @@\n from doit.cmd_help import Help as DoitHelp\n from doit.cmd_run import Run as DoitRun\n from doit.cmd_clean import Clean as DoitClean\n+from logbook import NullHandler\n \n from . import __version__\n from .nikola import Nikola\n@@ -48,9 +49,14 @@\n \n \n def main(args):\n+ quiet = False\n if len(args) > 0 and args[0] == 'build' and '--strict' in args:\n LOGGER.notice('Running in strict mode')\n STRICT_HANDLER.push_application()\n+ if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:\n+ nullhandler = NullHandler()\n+ nullhandler.push_application()\n+ quiet = True\n global config\n sys.path.append('')\n try:\n@@ -65,7 +71,7 @@\n config = {}\n \n site = Nikola(**config)\n- return DoitNikola(site).run(args)\n+ return DoitNikola(site, quiet).run(args)\n \n \n class Help(DoitHelp):\n@@ -97,6 +103,16 @@\n 'help': \"Fail on things that would normally be warnings.\",\n }\n )\n+ opts.append(\n+ {\n+ 'name': 'quiet',\n+ 'long': 'quiet',\n+ 'short': 'q',\n+ 'default': False,\n+ 'type': bool,\n+ 'help': \"Run quietly.\",\n+ }\n+ )\n self.cmd_options = tuple(opts)\n super(Build, self).__init__(*args, **kw)\n \n@@ -114,14 +130,21 @@\n \n class NikolaTaskLoader(TaskLoader):\n \"\"\"custom task loader to get tasks from Nikola instead of dodo.py file\"\"\"\n- def __init__(self, nikola):\n+ def __init__(self, nikola, quiet=False):\n self.nikola = nikola\n+ self.quiet = quiet\n \n def load_tasks(self, cmd, opt_values, pos_args):\n- DOIT_CONFIG = {\n- 'reporter': ExecutedOnlyReporter,\n- 'default_tasks': ['render_site', 'post_render'],\n- }\n+ if self.quiet:\n+ DOIT_CONFIG = {\n+ 'verbosity': 0,\n+ 'reporter': 'zero',\n+ }\n+ else:\n+ DOIT_CONFIG = {\n+ 'reporter': ExecutedOnlyReporter,\n+ }\n+ DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']\n tasks = generate_tasks('render_site', self.nikola.gen_tasks('render_site', \"Task\"))\n latetasks = generate_tasks('post_render', self.nikola.gen_tasks('post_render', \"LateTask\"))\n return tasks + latetasks, DOIT_CONFIG\n@@ -132,9 +155,9 @@\n DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean]\n TASK_LOADER = NikolaTaskLoader\n \n- def __init__(self, nikola):\n+ def __init__(self, nikola, quiet=False):\n self.nikola = nikola\n- self.task_loader = self.TASK_LOADER(nikola)\n+ self.task_loader = self.TASK_LOADER(nikola, quiet)\n \n def get_commands(self):\n # core doit commands\n", "issue": "Nikola needs a -q (quiet) switch for testing and other automated tasks\nJust needs to hide NOTICEs\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2013 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, unicode_literals\nfrom operator import attrgetter\nimport os\nimport shutil\nimport sys\nimport traceback\n\nfrom doit.loader import generate_tasks\nfrom doit.cmd_base import TaskLoader\nfrom doit.reporter import ExecutedOnlyReporter\nfrom doit.doit_cmd import DoitMain\nfrom doit.cmd_help import Help as DoitHelp\nfrom doit.cmd_run import Run as DoitRun\nfrom doit.cmd_clean import Clean as DoitClean\n\nfrom . import __version__\nfrom .nikola import Nikola\nfrom .utils import _reload, sys_decode, LOGGER, STRICT_HANDLER\n\n\nconfig = {}\n\n\ndef main(args):\n if len(args) > 0 and args[0] == 'build' and '--strict' in args:\n LOGGER.notice('Running in strict mode')\n STRICT_HANDLER.push_application()\n global config\n sys.path.append('')\n try:\n import conf\n _reload(conf)\n config = conf.__dict__\n except Exception:\n if os.path.exists('conf.py'):\n msg = traceback.format_exc(0).splitlines()[1]\n LOGGER.error('In conf.py line {0}: {1}'.format(sys.exc_info()[2].tb_lineno, msg))\n sys.exit(1)\n config = {}\n\n site = Nikola(**config)\n return DoitNikola(site).run(args)\n\n\nclass Help(DoitHelp):\n \"\"\"show Nikola usage instead of doit \"\"\"\n\n @staticmethod\n def print_usage(cmds):\n \"\"\"print nikola \"usage\" (basic help) instructions\"\"\"\n print(\"Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit http://getnikola.com\\n\\n\")\n print(\"Available commands:\")\n for cmd in sorted(cmds.values(), key=attrgetter('name')):\n print(\" nikola %-*s %s\" % (20, cmd.name, cmd.doc_purpose))\n print(\"\")\n print(\" nikola help show help / reference\")\n print(\" nikola help <command> show command usage\")\n print(\" nikola help <task-name> show task usage\")\n\n\nclass Build(DoitRun):\n \"\"\"expose \"run\" command as \"build\" for backward compatibility\"\"\"\n def __init__(self, *args, **kw):\n opts = list(self.cmd_options)\n opts.append(\n {\n 'name': 'strict',\n 'long': 'strict',\n 'default': False,\n 'type': bool,\n 'help': \"Fail on things that would normally be warnings.\",\n }\n )\n self.cmd_options = tuple(opts)\n super(Build, self).__init__(*args, **kw)\n\n\nclass Clean(DoitClean):\n \"\"\"A clean that removes cache/\"\"\"\n\n def clean_tasks(self, tasks, dryrun):\n if not dryrun and config:\n cache_folder = config.get('CACHE_FOLDER', 'cache')\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n return super(Clean, self).clean_tasks(tasks, dryrun)\n\n\nclass NikolaTaskLoader(TaskLoader):\n \"\"\"custom task loader to get tasks from Nikola instead of dodo.py file\"\"\"\n def __init__(self, nikola):\n self.nikola = nikola\n\n def load_tasks(self, cmd, opt_values, pos_args):\n DOIT_CONFIG = {\n 'reporter': ExecutedOnlyReporter,\n 'default_tasks': ['render_site', 'post_render'],\n }\n tasks = generate_tasks('render_site', self.nikola.gen_tasks('render_site', \"Task\"))\n latetasks = generate_tasks('post_render', self.nikola.gen_tasks('post_render', \"LateTask\"))\n return tasks + latetasks, DOIT_CONFIG\n\n\nclass DoitNikola(DoitMain):\n # overwite help command\n DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean]\n TASK_LOADER = NikolaTaskLoader\n\n def __init__(self, nikola):\n self.nikola = nikola\n self.task_loader = self.TASK_LOADER(nikola)\n\n def get_commands(self):\n # core doit commands\n cmds = DoitMain.get_commands(self)\n # load nikola commands\n for name, cmd in self.nikola.commands.items():\n cmds[name] = cmd\n return cmds\n\n def run(self, cmd_args):\n sub_cmds = self.get_commands()\n args = self.process_args(cmd_args)\n args = [sys_decode(arg) for arg in args]\n\n if len(args) == 0 or any(arg in [\"--help\", '-h'] for arg in args):\n cmd_args = ['help']\n args = ['help']\n # Hide run because Nikola uses build\n sub_cmds.pop('run')\n\n if len(args) == 0 or args[0] not in sub_cmds.keys() or \\\n args[0] == 'build':\n # Check for conf.py before launching run\n if not self.nikola.configured:\n LOGGER.error(\"This command needs to run inside an \"\n \"existing Nikola site.\")\n return False\n return super(DoitNikola, self).run(cmd_args)\n\n @staticmethod\n def print_version():\n print(\"Nikola version \" + __version__)\n", "path": "nikola/main.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2013 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import print_function, unicode_literals\nfrom operator import attrgetter\nimport os\nimport shutil\nimport sys\nimport traceback\n\nfrom doit.loader import generate_tasks\nfrom doit.cmd_base import TaskLoader\nfrom doit.reporter import ExecutedOnlyReporter\nfrom doit.doit_cmd import DoitMain\nfrom doit.cmd_help import Help as DoitHelp\nfrom doit.cmd_run import Run as DoitRun\nfrom doit.cmd_clean import Clean as DoitClean\nfrom logbook import NullHandler\n\nfrom . import __version__\nfrom .nikola import Nikola\nfrom .utils import _reload, sys_decode, LOGGER, STRICT_HANDLER\n\n\nconfig = {}\n\n\ndef main(args):\n quiet = False\n if len(args) > 0 and args[0] == 'build' and '--strict' in args:\n LOGGER.notice('Running in strict mode')\n STRICT_HANDLER.push_application()\n if len(args) > 0 and args[0] == 'build' and '-q' in args or '--quiet' in args:\n nullhandler = NullHandler()\n nullhandler.push_application()\n quiet = True\n global config\n sys.path.append('')\n try:\n import conf\n _reload(conf)\n config = conf.__dict__\n except Exception:\n if os.path.exists('conf.py'):\n msg = traceback.format_exc(0).splitlines()[1]\n LOGGER.error('In conf.py line {0}: {1}'.format(sys.exc_info()[2].tb_lineno, msg))\n sys.exit(1)\n config = {}\n\n site = Nikola(**config)\n return DoitNikola(site, quiet).run(args)\n\n\nclass Help(DoitHelp):\n \"\"\"show Nikola usage instead of doit \"\"\"\n\n @staticmethod\n def print_usage(cmds):\n \"\"\"print nikola \"usage\" (basic help) instructions\"\"\"\n print(\"Nikola is a tool to create static websites and blogs. For full documentation and more information, please visit http://getnikola.com\\n\\n\")\n print(\"Available commands:\")\n for cmd in sorted(cmds.values(), key=attrgetter('name')):\n print(\" nikola %-*s %s\" % (20, cmd.name, cmd.doc_purpose))\n print(\"\")\n print(\" nikola help show help / reference\")\n print(\" nikola help <command> show command usage\")\n print(\" nikola help <task-name> show task usage\")\n\n\nclass Build(DoitRun):\n \"\"\"expose \"run\" command as \"build\" for backward compatibility\"\"\"\n def __init__(self, *args, **kw):\n opts = list(self.cmd_options)\n opts.append(\n {\n 'name': 'strict',\n 'long': 'strict',\n 'default': False,\n 'type': bool,\n 'help': \"Fail on things that would normally be warnings.\",\n }\n )\n opts.append(\n {\n 'name': 'quiet',\n 'long': 'quiet',\n 'short': 'q',\n 'default': False,\n 'type': bool,\n 'help': \"Run quietly.\",\n }\n )\n self.cmd_options = tuple(opts)\n super(Build, self).__init__(*args, **kw)\n\n\nclass Clean(DoitClean):\n \"\"\"A clean that removes cache/\"\"\"\n\n def clean_tasks(self, tasks, dryrun):\n if not dryrun and config:\n cache_folder = config.get('CACHE_FOLDER', 'cache')\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n return super(Clean, self).clean_tasks(tasks, dryrun)\n\n\nclass NikolaTaskLoader(TaskLoader):\n \"\"\"custom task loader to get tasks from Nikola instead of dodo.py file\"\"\"\n def __init__(self, nikola, quiet=False):\n self.nikola = nikola\n self.quiet = quiet\n\n def load_tasks(self, cmd, opt_values, pos_args):\n if self.quiet:\n DOIT_CONFIG = {\n 'verbosity': 0,\n 'reporter': 'zero',\n }\n else:\n DOIT_CONFIG = {\n 'reporter': ExecutedOnlyReporter,\n }\n DOIT_CONFIG['default_tasks'] = ['render_site', 'post_render']\n tasks = generate_tasks('render_site', self.nikola.gen_tasks('render_site', \"Task\"))\n latetasks = generate_tasks('post_render', self.nikola.gen_tasks('post_render', \"LateTask\"))\n return tasks + latetasks, DOIT_CONFIG\n\n\nclass DoitNikola(DoitMain):\n # overwite help command\n DOIT_CMDS = list(DoitMain.DOIT_CMDS) + [Help, Build, Clean]\n TASK_LOADER = NikolaTaskLoader\n\n def __init__(self, nikola, quiet=False):\n self.nikola = nikola\n self.task_loader = self.TASK_LOADER(nikola, quiet)\n\n def get_commands(self):\n # core doit commands\n cmds = DoitMain.get_commands(self)\n # load nikola commands\n for name, cmd in self.nikola.commands.items():\n cmds[name] = cmd\n return cmds\n\n def run(self, cmd_args):\n sub_cmds = self.get_commands()\n args = self.process_args(cmd_args)\n args = [sys_decode(arg) for arg in args]\n\n if len(args) == 0 or any(arg in [\"--help\", '-h'] for arg in args):\n cmd_args = ['help']\n args = ['help']\n # Hide run because Nikola uses build\n sub_cmds.pop('run')\n\n if len(args) == 0 or args[0] not in sub_cmds.keys() or \\\n args[0] == 'build':\n # Check for conf.py before launching run\n if not self.nikola.configured:\n LOGGER.error(\"This command needs to run inside an \"\n \"existing Nikola site.\")\n return False\n return super(DoitNikola, self).run(cmd_args)\n\n @staticmethod\n def print_version():\n print(\"Nikola version \" + __version__)\n", "path": "nikola/main.py"}]}
| 2,096 | 794 |
gh_patches_debug_13268
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-987
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scheduler: Cannot drag and drop items
I tried out event: https://open-event.herokuapp.com/events/59/scheduler/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `open_event/api/sessions.py`
Content:
```
1 from flask.ext.restplus import Resource, Namespace
2 from sqlalchemy.orm.collections import InstrumentedList
3
4 from open_event.models.session import Session as SessionModel
5 from open_event.models.track import Track as TrackModel
6 from open_event.models.microlocation import Microlocation as MicrolocationModel
7 from open_event.models.speaker import Speaker as SpeakerModel
8
9 from .helpers.helpers import get_paginated_list, requires_auth, \
10 save_db_model, get_object_in_event
11 from .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \
12 PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES
13 from .helpers import custom_fields as fields
14 from .helpers.special_fields import SessionLanguageField, SessionStateField
15
16 api = Namespace('sessions', description='Sessions', path='/')
17
18 # Create models
19 SESSION_TRACK = api.model('SessionTrack', {
20 'id': fields.Integer(required=True),
21 'name': fields.String(),
22 })
23
24 SESSION_SPEAKER = api.model('SessionSpeaker', {
25 'id': fields.Integer(required=True),
26 'name': fields.String(),
27 'organisation': fields.String()
28 })
29
30 SESSION_MICROLOCATION = api.model('SessionMicrolocation', {
31 'id': fields.Integer(required=True),
32 'name': fields.String(),
33 })
34
35 SESSION = api.model('Session', {
36 'id': fields.Integer(required=True),
37 'title': fields.String(required=True),
38 'subtitle': fields.String(),
39 'short_abstract': fields.String(),
40 'long_abstract': fields.String(required=True),
41 'comments': fields.String(),
42 'start_time': fields.DateTime(required=True),
43 'end_time': fields.DateTime(required=True),
44 'track': fields.Nested(SESSION_TRACK, allow_null=True),
45 'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),
46 'language': SessionLanguageField(),
47 'microlocation': fields.Nested(SESSION_MICROLOCATION, allow_null=True),
48 'slides': fields.String(),
49 'video': fields.String(),
50 'audio': fields.String(),
51 'signup_url': fields.Uri(),
52 'state': SessionStateField()
53 })
54
55 SESSION_PAGINATED = api.clone('SessionPaginated', PAGINATED_MODEL, {
56 'results': fields.List(fields.Nested(SESSION))
57 })
58
59 SESSION_POST = api.clone('SessionPost', SESSION, {
60 'track_id': fields.Integer(),
61 'speaker_ids': fields.List(fields.Integer()),
62 'microlocation_id': fields.Integer()
63 })
64 del SESSION_POST['id']
65 del SESSION_POST['track']
66 del SESSION_POST['speakers']
67 del SESSION_POST['microlocation']
68
69
70 # Create DAO
71 class SessionDAO(ServiceDAO):
72 def _delete_fields(self, data):
73 del data['speaker_ids']
74 del data['track_id']
75 del data['microlocation_id']
76 data['start_time'] = SESSION_POST['start_time'].from_str(
77 data['start_time'])
78 data['end_time'] = SESSION_POST['end_time'].from_str(data['end_time'])
79 return data
80
81 def get_object(self, model, sid, event_id):
82 """
83 returns object (model). Checks if object is in same event
84 """
85 if sid is None:
86 return None
87 return get_object_in_event(model, sid, event_id)
88
89 def fix_payload_post(self, event_id, data):
90 """
91 Fixes payload of POST request
92 """
93 data['track'] = self.get_object(TrackModel, data['track_id'], event_id)
94 data['microlocation'] = self.get_object(MicrolocationModel, data['microlocation_id'], event_id)
95 data['event_id'] = event_id
96 data['speakers'] = InstrumentedList(
97 SpeakerModel.query.get(_) for _ in data['speaker_ids']
98 if self.get_object(SpeakerModel, _, event_id) is not None
99 )
100 data = self._delete_fields(data)
101 return data
102
103 def update(self, event_id, service_id, data):
104 data = self.validate(data)
105 data_copy = data.copy()
106 data_copy = self.fix_payload_post(event_id, data_copy)
107 data = self._delete_fields(data)
108 obj = ServiceDAO.update(self, event_id, service_id, data)
109 obj.track = data_copy['track']
110 obj.microlocation = data_copy['microlocation']
111 obj.speakers = data_copy['speakers']
112 obj = save_db_model(obj, SessionModel.__name__, event_id)
113 return obj
114
115 def create(self, event_id, data, url):
116 data = self.validate(data)
117 payload = self.fix_payload_post(event_id, data)
118 return ServiceDAO.create(self, event_id, payload, url, validate=False)
119
120
121 DAO = SessionDAO(SessionModel, SESSION_POST)
122
123
124 @api.route('/events/<int:event_id>/sessions/<int:session_id>')
125 @api.response(404, 'Session not found')
126 @api.response(400, 'Session does not belong to event')
127 class Session(Resource):
128 @api.doc('get_session')
129 @api.marshal_with(SESSION)
130 def get(self, event_id, session_id):
131 """Fetch a session given its id"""
132 return DAO.get(event_id, session_id)
133
134 @requires_auth
135 @api.doc('delete_session')
136 @api.marshal_with(SESSION)
137 def delete(self, event_id, session_id):
138 """Delete a session given its id"""
139 return DAO.delete(event_id, session_id)
140
141 @requires_auth
142 @api.doc('update_session', responses=PUT_RESPONSES)
143 @api.marshal_with(SESSION)
144 @api.expect(SESSION_POST)
145 def put(self, event_id, session_id):
146 """Update a session given its id"""
147 return DAO.update(event_id, session_id, self.api.payload)
148
149
150 @api.route('/events/<int:event_id>/sessions')
151 class SessionList(Resource):
152 @api.doc('list_sessions')
153 @api.marshal_list_with(SESSION)
154 def get(self, event_id):
155 """List all sessions"""
156 return DAO.list(event_id)
157
158 @requires_auth
159 @api.doc('create_session', responses=POST_RESPONSES)
160 @api.marshal_with(SESSION)
161 @api.expect(SESSION_POST)
162 def post(self, event_id):
163 """Create a session"""
164 return DAO.create(
165 event_id,
166 self.api.payload,
167 self.api.url_for(self, event_id=event_id)
168 )
169
170
171 @api.route('/events/<int:event_id>/sessions/page')
172 class SessionListPaginated(Resource, PaginatedResourceBase):
173 @api.doc('list_sessions_paginated', params=PAGE_PARAMS)
174 @api.marshal_with(SESSION_PAGINATED)
175 def get(self, event_id):
176 """List sessions in a paginated manner"""
177 return get_paginated_list(
178 SessionModel,
179 self.api.url_for(self, event_id=event_id),
180 args=self.parser.parse_args(),
181 event_id=event_id
182 )
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/open_event/api/sessions.py b/open_event/api/sessions.py
--- a/open_event/api/sessions.py
+++ b/open_event/api/sessions.py
@@ -37,10 +37,10 @@
'title': fields.String(required=True),
'subtitle': fields.String(),
'short_abstract': fields.String(),
- 'long_abstract': fields.String(required=True),
+ 'long_abstract': fields.String(),
'comments': fields.String(),
- 'start_time': fields.DateTime(required=True),
- 'end_time': fields.DateTime(required=True),
+ 'start_time': fields.DateTime(),
+ 'end_time': fields.DateTime(),
'track': fields.Nested(SESSION_TRACK, allow_null=True),
'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),
'language': SessionLanguageField(),
|
{"golden_diff": "diff --git a/open_event/api/sessions.py b/open_event/api/sessions.py\n--- a/open_event/api/sessions.py\n+++ b/open_event/api/sessions.py\n@@ -37,10 +37,10 @@\n 'title': fields.String(required=True),\n 'subtitle': fields.String(),\n 'short_abstract': fields.String(),\n- 'long_abstract': fields.String(required=True),\n+ 'long_abstract': fields.String(),\n 'comments': fields.String(),\n- 'start_time': fields.DateTime(required=True),\n- 'end_time': fields.DateTime(required=True),\n+ 'start_time': fields.DateTime(),\n+ 'end_time': fields.DateTime(),\n 'track': fields.Nested(SESSION_TRACK, allow_null=True),\n 'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),\n 'language': SessionLanguageField(),\n", "issue": "Scheduler: Cannot drag and drop items\nI tried out event: https://open-event.herokuapp.com/events/59/scheduler/\n\n", "before_files": [{"content": "from flask.ext.restplus import Resource, Namespace\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nfrom open_event.models.session import Session as SessionModel\nfrom open_event.models.track import Track as TrackModel\nfrom open_event.models.microlocation import Microlocation as MicrolocationModel\nfrom open_event.models.speaker import Speaker as SpeakerModel\n\nfrom .helpers.helpers import get_paginated_list, requires_auth, \\\n save_db_model, get_object_in_event\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES\nfrom .helpers import custom_fields as fields\nfrom .helpers.special_fields import SessionLanguageField, SessionStateField\n\napi = Namespace('sessions', description='Sessions', path='/')\n\n# Create models\nSESSION_TRACK = api.model('SessionTrack', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION_SPEAKER = api.model('SessionSpeaker', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n 'organisation': fields.String()\n})\n\nSESSION_MICROLOCATION = api.model('SessionMicrolocation', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION = api.model('Session', {\n 'id': fields.Integer(required=True),\n 'title': fields.String(required=True),\n 'subtitle': fields.String(),\n 'short_abstract': fields.String(),\n 'long_abstract': fields.String(required=True),\n 'comments': fields.String(),\n 'start_time': fields.DateTime(required=True),\n 'end_time': fields.DateTime(required=True),\n 'track': fields.Nested(SESSION_TRACK, allow_null=True),\n 'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),\n 'language': SessionLanguageField(),\n 'microlocation': fields.Nested(SESSION_MICROLOCATION, allow_null=True),\n 'slides': fields.String(),\n 'video': fields.String(),\n 'audio': fields.String(),\n 'signup_url': fields.Uri(),\n 'state': SessionStateField()\n})\n\nSESSION_PAGINATED = api.clone('SessionPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(SESSION))\n})\n\nSESSION_POST = api.clone('SessionPost', SESSION, {\n 'track_id': fields.Integer(),\n 'speaker_ids': fields.List(fields.Integer()),\n 'microlocation_id': fields.Integer()\n})\ndel SESSION_POST['id']\ndel SESSION_POST['track']\ndel SESSION_POST['speakers']\ndel SESSION_POST['microlocation']\n\n\n# Create DAO\nclass SessionDAO(ServiceDAO):\n def _delete_fields(self, data):\n del data['speaker_ids']\n del data['track_id']\n del data['microlocation_id']\n data['start_time'] = SESSION_POST['start_time'].from_str(\n data['start_time'])\n data['end_time'] = SESSION_POST['end_time'].from_str(data['end_time'])\n return data\n\n def get_object(self, model, sid, event_id):\n \"\"\"\n returns object (model). Checks if object is in same event\n \"\"\"\n if sid is None:\n return None\n return get_object_in_event(model, sid, event_id)\n\n def fix_payload_post(self, event_id, data):\n \"\"\"\n Fixes payload of POST request\n \"\"\"\n data['track'] = self.get_object(TrackModel, data['track_id'], event_id)\n data['microlocation'] = self.get_object(MicrolocationModel, data['microlocation_id'], event_id)\n data['event_id'] = event_id\n data['speakers'] = InstrumentedList(\n SpeakerModel.query.get(_) for _ in data['speaker_ids']\n if self.get_object(SpeakerModel, _, event_id) is not None\n )\n data = self._delete_fields(data)\n return data\n\n def update(self, event_id, service_id, data):\n data = self.validate(data)\n data_copy = data.copy()\n data_copy = self.fix_payload_post(event_id, data_copy)\n data = self._delete_fields(data)\n obj = ServiceDAO.update(self, event_id, service_id, data)\n obj.track = data_copy['track']\n obj.microlocation = data_copy['microlocation']\n obj.speakers = data_copy['speakers']\n obj = save_db_model(obj, SessionModel.__name__, event_id)\n return obj\n\n def create(self, event_id, data, url):\n data = self.validate(data)\n payload = self.fix_payload_post(event_id, data)\n return ServiceDAO.create(self, event_id, payload, url, validate=False)\n\n\nDAO = SessionDAO(SessionModel, SESSION_POST)\n\n\[email protected]('/events/<int:event_id>/sessions/<int:session_id>')\[email protected](404, 'Session not found')\[email protected](400, 'Session does not belong to event')\nclass Session(Resource):\n @api.doc('get_session')\n @api.marshal_with(SESSION)\n def get(self, event_id, session_id):\n \"\"\"Fetch a session given its id\"\"\"\n return DAO.get(event_id, session_id)\n\n @requires_auth\n @api.doc('delete_session')\n @api.marshal_with(SESSION)\n def delete(self, event_id, session_id):\n \"\"\"Delete a session given its id\"\"\"\n return DAO.delete(event_id, session_id)\n\n @requires_auth\n @api.doc('update_session', responses=PUT_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def put(self, event_id, session_id):\n \"\"\"Update a session given its id\"\"\"\n return DAO.update(event_id, session_id, self.api.payload)\n\n\[email protected]('/events/<int:event_id>/sessions')\nclass SessionList(Resource):\n @api.doc('list_sessions')\n @api.marshal_list_with(SESSION)\n def get(self, event_id):\n \"\"\"List all sessions\"\"\"\n return DAO.list(event_id)\n\n @requires_auth\n @api.doc('create_session', responses=POST_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def post(self, event_id):\n \"\"\"Create a session\"\"\"\n return DAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n\n\[email protected]('/events/<int:event_id>/sessions/page')\nclass SessionListPaginated(Resource, PaginatedResourceBase):\n @api.doc('list_sessions_paginated', params=PAGE_PARAMS)\n @api.marshal_with(SESSION_PAGINATED)\n def get(self, event_id):\n \"\"\"List sessions in a paginated manner\"\"\"\n return get_paginated_list(\n SessionModel,\n self.api.url_for(self, event_id=event_id),\n args=self.parser.parse_args(),\n event_id=event_id\n )\n", "path": "open_event/api/sessions.py"}], "after_files": [{"content": "from flask.ext.restplus import Resource, Namespace\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nfrom open_event.models.session import Session as SessionModel\nfrom open_event.models.track import Track as TrackModel\nfrom open_event.models.microlocation import Microlocation as MicrolocationModel\nfrom open_event.models.speaker import Speaker as SpeakerModel\n\nfrom .helpers.helpers import get_paginated_list, requires_auth, \\\n save_db_model, get_object_in_event\nfrom .helpers.utils import PAGINATED_MODEL, PaginatedResourceBase, ServiceDAO, \\\n PAGE_PARAMS, POST_RESPONSES, PUT_RESPONSES\nfrom .helpers import custom_fields as fields\nfrom .helpers.special_fields import SessionLanguageField, SessionStateField\n\napi = Namespace('sessions', description='Sessions', path='/')\n\n# Create models\nSESSION_TRACK = api.model('SessionTrack', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION_SPEAKER = api.model('SessionSpeaker', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n 'organisation': fields.String()\n})\n\nSESSION_MICROLOCATION = api.model('SessionMicrolocation', {\n 'id': fields.Integer(required=True),\n 'name': fields.String(),\n})\n\nSESSION = api.model('Session', {\n 'id': fields.Integer(required=True),\n 'title': fields.String(required=True),\n 'subtitle': fields.String(),\n 'short_abstract': fields.String(),\n 'long_abstract': fields.String(),\n 'comments': fields.String(),\n 'start_time': fields.DateTime(),\n 'end_time': fields.DateTime(),\n 'track': fields.Nested(SESSION_TRACK, allow_null=True),\n 'speakers': fields.List(fields.Nested(SESSION_SPEAKER)),\n 'language': SessionLanguageField(),\n 'microlocation': fields.Nested(SESSION_MICROLOCATION, allow_null=True),\n 'slides': fields.String(),\n 'video': fields.String(),\n 'audio': fields.String(),\n 'signup_url': fields.Uri(),\n 'state': SessionStateField()\n})\n\nSESSION_PAGINATED = api.clone('SessionPaginated', PAGINATED_MODEL, {\n 'results': fields.List(fields.Nested(SESSION))\n})\n\nSESSION_POST = api.clone('SessionPost', SESSION, {\n 'track_id': fields.Integer(),\n 'speaker_ids': fields.List(fields.Integer()),\n 'microlocation_id': fields.Integer()\n})\ndel SESSION_POST['id']\ndel SESSION_POST['track']\ndel SESSION_POST['speakers']\ndel SESSION_POST['microlocation']\n\n\n# Create DAO\nclass SessionDAO(ServiceDAO):\n def _delete_fields(self, data):\n del data['speaker_ids']\n del data['track_id']\n del data['microlocation_id']\n data['start_time'] = SESSION_POST['start_time'].from_str(\n data['start_time'])\n data['end_time'] = SESSION_POST['end_time'].from_str(data['end_time'])\n return data\n\n def get_object(self, model, sid, event_id):\n \"\"\"\n returns object (model). Checks if object is in same event\n \"\"\"\n if sid is None:\n return None\n return get_object_in_event(model, sid, event_id)\n\n def fix_payload_post(self, event_id, data):\n \"\"\"\n Fixes payload of POST request\n \"\"\"\n data['track'] = self.get_object(TrackModel, data['track_id'], event_id)\n data['microlocation'] = self.get_object(MicrolocationModel, data['microlocation_id'], event_id)\n data['event_id'] = event_id\n data['speakers'] = InstrumentedList(\n SpeakerModel.query.get(_) for _ in data['speaker_ids']\n if self.get_object(SpeakerModel, _, event_id) is not None\n )\n data = self._delete_fields(data)\n return data\n\n def update(self, event_id, service_id, data):\n data = self.validate(data)\n data_copy = data.copy()\n data_copy = self.fix_payload_post(event_id, data_copy)\n data = self._delete_fields(data)\n obj = ServiceDAO.update(self, event_id, service_id, data)\n obj.track = data_copy['track']\n obj.microlocation = data_copy['microlocation']\n obj.speakers = data_copy['speakers']\n obj = save_db_model(obj, SessionModel.__name__, event_id)\n return obj\n\n def create(self, event_id, data, url):\n data = self.validate(data)\n payload = self.fix_payload_post(event_id, data)\n return ServiceDAO.create(self, event_id, payload, url, validate=False)\n\n\nDAO = SessionDAO(SessionModel, SESSION_POST)\n\n\[email protected]('/events/<int:event_id>/sessions/<int:session_id>')\[email protected](404, 'Session not found')\[email protected](400, 'Session does not belong to event')\nclass Session(Resource):\n @api.doc('get_session')\n @api.marshal_with(SESSION)\n def get(self, event_id, session_id):\n \"\"\"Fetch a session given its id\"\"\"\n return DAO.get(event_id, session_id)\n\n @requires_auth\n @api.doc('delete_session')\n @api.marshal_with(SESSION)\n def delete(self, event_id, session_id):\n \"\"\"Delete a session given its id\"\"\"\n return DAO.delete(event_id, session_id)\n\n @requires_auth\n @api.doc('update_session', responses=PUT_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def put(self, event_id, session_id):\n \"\"\"Update a session given its id\"\"\"\n return DAO.update(event_id, session_id, self.api.payload)\n\n\[email protected]('/events/<int:event_id>/sessions')\nclass SessionList(Resource):\n @api.doc('list_sessions')\n @api.marshal_list_with(SESSION)\n def get(self, event_id):\n \"\"\"List all sessions\"\"\"\n return DAO.list(event_id)\n\n @requires_auth\n @api.doc('create_session', responses=POST_RESPONSES)\n @api.marshal_with(SESSION)\n @api.expect(SESSION_POST)\n def post(self, event_id):\n \"\"\"Create a session\"\"\"\n return DAO.create(\n event_id,\n self.api.payload,\n self.api.url_for(self, event_id=event_id)\n )\n\n\[email protected]('/events/<int:event_id>/sessions/page')\nclass SessionListPaginated(Resource, PaginatedResourceBase):\n @api.doc('list_sessions_paginated', params=PAGE_PARAMS)\n @api.marshal_with(SESSION_PAGINATED)\n def get(self, event_id):\n \"\"\"List sessions in a paginated manner\"\"\"\n return get_paginated_list(\n SessionModel,\n self.api.url_for(self, event_id=event_id),\n args=self.parser.parse_args(),\n event_id=event_id\n )\n", "path": "open_event/api/sessions.py"}]}
| 2,191 | 179 |
gh_patches_debug_9937
|
rasdani/github-patches
|
git_diff
|
liberapay__liberapay.com-530
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SMTP exceptions aren't caught
- https://sentry.io/share/issue/3132343535362e313939323633313931/
- https://sentry.io/share/issue/3132343535362e313939323533393935/
- https://sentry.io/share/issue/3132343535362e313939323533383839/
```
SMTPServerDisconnected: please run connect() first
File "site-packages/algorithm.py", line 321, in loop
new_state = function(**deps.as_kwargs)
File "liberapay/security/authentication.py", line 136, in authenticate_user_if_possible
p = sign_in_with_form_data(body, state)
File "liberapay/security/authentication.py", line 88, in sign_in_with_form_data
p.add_email(email, cursor=c)
File "liberapay/models/participant.py", line 618, in add_email
r = self.send_email('verification', email=email, link=link.format(**locals()))
File "liberapay/models/participant.py", line 733, in send_email
n = website.mailer.send(**message)
File "mailshake/mailers/base.py", line 41, in send
return self.send_messages(EmailMessage(*args, **kwargs))
File "mailshake/mailers/smtp.py", line 105, in send_messages
sent = self._send(message)
File "mailshake/mailers/smtp.py", line 126, in _send
from_email, recipients, email_message.as_bytes()
File "python2.7/smtplib.py", line 733, in sendmail
(code, resp) = self.mail(from_addr, esmtp_opts)
File "python2.7/smtplib.py", line 480, in mail
self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender), optionlist))
File "python2.7/smtplib.py", line 341, in putcmd
self.send(str)
File "python2.7/smtplib.py", line 333, in send
raise SMTPServerDisconnected('please run connect() first')
```
```
SMTPRecipientsRefused: {u"~3921 <[email protected]|ping-n21127.0.0.1||`ping-c21127.0.0.1`#'|ping-n21127.0.0.1||`ping-c21127.0.0.1`#\\>": (501, 'Syntax error')}
File "site-packages/algorithm.py", line 321, in loop
new_state = function(**deps.as_kwargs)
File "liberapay/security/authentication.py", line 136, in authenticate_user_if_possible
p = sign_in_with_form_data(body, state)
File "liberapay/security/authentication.py", line 88, in sign_in_with_form_data
p.add_email(email, cursor=c)
File "liberapay/models/participant.py", line 618, in add_email
r = self.send_email('verification', email=email, link=link.format(**locals()))
File "liberapay/models/participant.py", line 733, in send_email
n = website.mailer.send(**message)
File "mailshake/mailers/base.py", line 41, in send
return self.send_messages(EmailMessage(*args, **kwargs))
File "mailshake/mailers/smtp.py", line 105, in send_messages
sent = self._send(message)
File "mailshake/mailers/smtp.py", line 126, in _send
from_email, recipients, email_message.as_bytes()
File "python2.7/smtplib.py", line 747, in sendmail
raise SMTPRecipientsRefused(senderrs)
```
```
SMTPDataError: (554, 'Recipient format is invalid: \'["[email protected]\'and3405=3406--"]\'')
File "site-packages/algorithm.py", line 321, in loop
new_state = function(**deps.as_kwargs)
File "liberapay/security/authentication.py", line 136, in authenticate_user_if_possible
p = sign_in_with_form_data(body, state)
File "liberapay/security/authentication.py", line 88, in sign_in_with_form_data
p.add_email(email, cursor=c)
File "liberapay/models/participant.py", line 618, in add_email
r = self.send_email('verification', email=email, link=link.format(**locals()))
File "liberapay/models/participant.py", line 733, in send_email
n = website.mailer.send(**message)
File "mailshake/mailers/base.py", line 41, in send
return self.send_messages(EmailMessage(*args, **kwargs))
File "mailshake/mailers/smtp.py", line 105, in send_messages
sent = self._send(message)
File "mailshake/mailers/smtp.py", line 126, in _send
from_email, recipients, email_message.as_bytes()
File "python2.7/smtplib.py", line 751, in sendmail
raise SMTPDataError(code, resp)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/constants.py`
Content:
```
1 # coding: utf8
2 from __future__ import print_function, unicode_literals
3
4 from collections import namedtuple, OrderedDict
5 from datetime import date, datetime, timedelta
6 from decimal import Decimal, ROUND_UP
7 import re
8
9 from jinja2 import StrictUndefined
10 from pando.utils import utc
11
12
13 class CustomUndefined(StrictUndefined):
14 __bool__ = __nonzero__ = lambda self: False
15
16 def __str__(self):
17 try:
18 self._fail_with_undefined_error()
19 except Exception as e:
20 self._tell_sentry(e, {})
21 return ''
22
23 __unicode__ = __str__
24
25
26 def check_bits(bits):
27 assert len(set(bits)) == len(bits) # no duplicates
28 assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit
29
30
31 Event = namedtuple('Event', 'name bit title')
32
33 Fees = namedtuple('Fees', ('var', 'fix'))
34
35
36 _ = lambda a: a
37
38 ASCII_ALLOWED_IN_USERNAME = set("0123456789"
39 "abcdefghijklmnopqrstuvwxyz"
40 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
41 "-_")
42
43 AVATAR_QUERY = '?s=160&default=retro'
44 AVATAR_SOURCES = 'libravatar bitbucket facebook github google twitter'.split()
45
46 BIRTHDAY = date(2015, 5, 22)
47
48 D_CENT = Decimal('0.01')
49 D_INF = Decimal('inf')
50 D_UNIT = Decimal('1.00')
51 D_ZERO = Decimal('0.00')
52
53 DONATION_LIMITS_WEEKLY = (Decimal('0.01'), Decimal('100.00'))
54 DONATION_LIMITS = {
55 'weekly': DONATION_LIMITS_WEEKLY,
56 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)
57 for x in DONATION_LIMITS_WEEKLY),
58 'yearly': tuple((x * Decimal(52)).quantize(D_CENT)
59 for x in DONATION_LIMITS_WEEKLY),
60 }
61 DONATION_WEEKLY_MIN, DONATION_WEEKLY_MAX = DONATION_LIMITS_WEEKLY
62
63 ELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}
64
65 EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
66 EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
67
68 EPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)
69
70 EVENTS = [
71 Event('income', 1, _("When I receive money")),
72 Event('low_balance', 2, _("When there isn't enough money in my wallet to cover my donations")),
73 Event('withdrawal_created', 4, _("When a transfer to my bank account is initiated")),
74 Event('withdrawal_failed', 8, _("When a transfer to my bank account fails")),
75 Event('pledgee_joined', 16, _("When someone I pledge to joins Liberapay")),
76 Event('team_invite', 32, _("When someone invites me to join a team")),
77 Event('payin_bankwire_failed', 64, _("When a bank wire transfer to my Liberapay wallet fails")),
78 Event('payin_bankwire_succeeded', 128, _("When a bank wire transfer to my Liberapay wallet succeeds")),
79 ]
80 check_bits([e.bit for e in EVENTS])
81 EVENTS = OrderedDict((e.name, e) for e in EVENTS)
82 EVENTS_S = ' '.join(EVENTS.keys())
83
84 # https://www.mangopay.com/pricing/
85 FEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), Decimal(0)) # 0.5%
86 FEE_PAYIN_CARD = Fees(Decimal('0.018'), Decimal('0.18')) # 1.8% + €0.18
87 FEE_PAYOUT = Fees(Decimal(0), Decimal(0))
88 FEE_PAYOUT_OUTSIDE_SEPA = Fees(Decimal(0), Decimal('2.5'))
89 FEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%
90 FEE_VAT = Decimal('0.17') # 17% (Luxembourg rate)
91
92 JINJA_ENV_COMMON = dict(
93 trim_blocks=True, lstrip_blocks=True,
94 line_statement_prefix='%',
95 # undefined=CustomUndefined,
96 )
97
98 # https://docs.mangopay.com/api-references/kyc-rules/
99 KYC_PAYIN_YEARLY_THRESHOLD = Decimal('2500')
100 KYC_PAYOUT_YEARLY_THRESHOLD = Decimal('1000')
101
102 LAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)
103
104 PASSWORD_MIN_SIZE = 8
105 PASSWORD_MAX_SIZE = 150
106
107 PAYIN_BANK_WIRE_MIN = Decimal('2.00')
108 PAYIN_CARD_MIN = Decimal("15.00") # fee ≈ 3.5%
109 PAYIN_CARD_TARGET = Decimal("92.00") # fee ≈ 2.33%
110
111 PERIOD_CONVERSION_RATES = {
112 'weekly': Decimal(1),
113 'monthly': Decimal(12) / Decimal(52),
114 'yearly': Decimal(1) / Decimal(52),
115 }
116
117 POSTAL_ADDRESS_KEYS = (
118 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'
119 )
120
121 PRIVACY_FIELDS = OrderedDict([
122 ('hide_giving', _("Hide total giving from others.")),
123 ('hide_receiving', _("Hide total receiving from others.")),
124 ('hide_from_search', _("Hide myself from search results on Liberapay.")),
125 ('profile_noindex', _("Tell web search engines not to index my profile.")),
126 ('hide_from_lists', _("Prevent my profile from being listed on Liberapay.")),
127 ])
128 PRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())
129
130 PRIVILEGES = dict(admin=1, run_payday=2)
131 check_bits(list(PRIVILEGES.values()))
132
133 QUARANTINE = timedelta(weeks=4)
134
135 SEPA_ZONE = set("""
136 AT BE BG CH CY CZ DE DK EE ES ES FI FR GB GI GR HR HU IE IS IT LI LT LU LV
137 MC MT NL NO PL PT RO SE SI SK
138 """.split())
139
140 SESSION = str('session') # bytes in python2, unicode in python3
141 SESSION_REFRESH = timedelta(hours=1)
142 SESSION_TIMEOUT = timedelta(hours=6)
143
144 STANDARD_TIPS = (
145 (_("Symbolic ({0} per week)"), Decimal('0.01')),
146 (_("Small ({0} per week)"), Decimal('0.25')),
147 (_("Medium ({0} per week)"), Decimal('1.00')),
148 (_("Large ({0} per week)"), Decimal('5.00')),
149 (_("Maximum ({0} per week)"), DONATION_WEEKLY_MAX),
150 )
151
152 USERNAME_MAX_SIZE = 32
153
154 del _
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/liberapay/constants.py b/liberapay/constants.py
--- a/liberapay/constants.py
+++ b/liberapay/constants.py
@@ -63,7 +63,16 @@
ELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}
EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
-EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
+EMAIL_RE = re.compile(r'''
+ # This is the regexp used by MangoPay (as of February 2017).
+ # It rejects some valid but exotic addresses.
+ # https://en.wikipedia.org/wiki/Email_address
+ ^
+ [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*
+ @
+ ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?
+ $
+''', re.VERBOSE)
EPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)
|
{"golden_diff": "diff --git a/liberapay/constants.py b/liberapay/constants.py\n--- a/liberapay/constants.py\n+++ b/liberapay/constants.py\n@@ -63,7 +63,16 @@\n ELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}\n \n EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\n-EMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n+EMAIL_RE = re.compile(r'''\n+ # This is the regexp used by MangoPay (as of February 2017).\n+ # It rejects some valid but exotic addresses.\n+ # https://en.wikipedia.org/wiki/Email_address\n+ ^\n+ [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*\n+ @\n+ ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n+ $\n+''', re.VERBOSE)\n \n EPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n", "issue": "SMTP exceptions aren't caught\n- https://sentry.io/share/issue/3132343535362e313939323633313931/\r\n- https://sentry.io/share/issue/3132343535362e313939323533393935/\r\n- https://sentry.io/share/issue/3132343535362e313939323533383839/\r\n\r\n```\r\nSMTPServerDisconnected: please run connect() first\r\n File \"site-packages/algorithm.py\", line 321, in loop\r\n new_state = function(**deps.as_kwargs)\r\n File \"liberapay/security/authentication.py\", line 136, in authenticate_user_if_possible\r\n p = sign_in_with_form_data(body, state)\r\n File \"liberapay/security/authentication.py\", line 88, in sign_in_with_form_data\r\n p.add_email(email, cursor=c)\r\n File \"liberapay/models/participant.py\", line 618, in add_email\r\n r = self.send_email('verification', email=email, link=link.format(**locals()))\r\n File \"liberapay/models/participant.py\", line 733, in send_email\r\n n = website.mailer.send(**message)\r\n File \"mailshake/mailers/base.py\", line 41, in send\r\n return self.send_messages(EmailMessage(*args, **kwargs))\r\n File \"mailshake/mailers/smtp.py\", line 105, in send_messages\r\n sent = self._send(message)\r\n File \"mailshake/mailers/smtp.py\", line 126, in _send\r\n from_email, recipients, email_message.as_bytes()\r\n File \"python2.7/smtplib.py\", line 733, in sendmail\r\n (code, resp) = self.mail(from_addr, esmtp_opts)\r\n File \"python2.7/smtplib.py\", line 480, in mail\r\n self.putcmd(\"mail\", \"FROM:%s%s\" % (quoteaddr(sender), optionlist))\r\n File \"python2.7/smtplib.py\", line 341, in putcmd\r\n self.send(str)\r\n File \"python2.7/smtplib.py\", line 333, in send\r\n raise SMTPServerDisconnected('please run connect() first')\r\n```\r\n\r\n```\r\nSMTPRecipientsRefused: {u\"~3921 <[email protected]|ping-n21127.0.0.1||`ping-c21127.0.0.1`#'|ping-n21127.0.0.1||`ping-c21127.0.0.1`#\\\\>\": (501, 'Syntax error')}\r\n File \"site-packages/algorithm.py\", line 321, in loop\r\n new_state = function(**deps.as_kwargs)\r\n File \"liberapay/security/authentication.py\", line 136, in authenticate_user_if_possible\r\n p = sign_in_with_form_data(body, state)\r\n File \"liberapay/security/authentication.py\", line 88, in sign_in_with_form_data\r\n p.add_email(email, cursor=c)\r\n File \"liberapay/models/participant.py\", line 618, in add_email\r\n r = self.send_email('verification', email=email, link=link.format(**locals()))\r\n File \"liberapay/models/participant.py\", line 733, in send_email\r\n n = website.mailer.send(**message)\r\n File \"mailshake/mailers/base.py\", line 41, in send\r\n return self.send_messages(EmailMessage(*args, **kwargs))\r\n File \"mailshake/mailers/smtp.py\", line 105, in send_messages\r\n sent = self._send(message)\r\n File \"mailshake/mailers/smtp.py\", line 126, in _send\r\n from_email, recipients, email_message.as_bytes()\r\n File \"python2.7/smtplib.py\", line 747, in sendmail\r\n raise SMTPRecipientsRefused(senderrs)\r\n```\r\n\r\n```\r\nSMTPDataError: (554, 'Recipient format is invalid: \\'[\"[email protected]\\'and3405=3406--\"]\\'')\r\n File \"site-packages/algorithm.py\", line 321, in loop\r\n new_state = function(**deps.as_kwargs)\r\n File \"liberapay/security/authentication.py\", line 136, in authenticate_user_if_possible\r\n p = sign_in_with_form_data(body, state)\r\n File \"liberapay/security/authentication.py\", line 88, in sign_in_with_form_data\r\n p.add_email(email, cursor=c)\r\n File \"liberapay/models/participant.py\", line 618, in add_email\r\n r = self.send_email('verification', email=email, link=link.format(**locals()))\r\n File \"liberapay/models/participant.py\", line 733, in send_email\r\n n = website.mailer.send(**message)\r\n File \"mailshake/mailers/base.py\", line 41, in send\r\n return self.send_messages(EmailMessage(*args, **kwargs))\r\n File \"mailshake/mailers/smtp.py\", line 105, in send_messages\r\n sent = self._send(message)\r\n File \"mailshake/mailers/smtp.py\", line 126, in _send\r\n from_email, recipients, email_message.as_bytes()\r\n File \"python2.7/smtplib.py\", line 751, in sendmail\r\n raise SMTPDataError(code, resp)\r\n```\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import print_function, unicode_literals\n\nfrom collections import namedtuple, OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom decimal import Decimal, ROUND_UP\nimport re\n\nfrom jinja2 import StrictUndefined\nfrom pando.utils import utc\n\n\nclass CustomUndefined(StrictUndefined):\n __bool__ = __nonzero__ = lambda self: False\n\n def __str__(self):\n try:\n self._fail_with_undefined_error()\n except Exception as e:\n self._tell_sentry(e, {})\n return ''\n\n __unicode__ = __str__\n\n\ndef check_bits(bits):\n assert len(set(bits)) == len(bits) # no duplicates\n assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit\n\n\nEvent = namedtuple('Event', 'name bit title')\n\nFees = namedtuple('Fees', ('var', 'fix'))\n\n\n_ = lambda a: a\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_\")\n\nAVATAR_QUERY = '?s=160&default=retro'\nAVATAR_SOURCES = 'libravatar bitbucket facebook github google twitter'.split()\n\nBIRTHDAY = date(2015, 5, 22)\n\nD_CENT = Decimal('0.01')\nD_INF = Decimal('inf')\nD_UNIT = Decimal('1.00')\nD_ZERO = Decimal('0.00')\n\nDONATION_LIMITS_WEEKLY = (Decimal('0.01'), Decimal('100.00'))\nDONATION_LIMITS = {\n 'weekly': DONATION_LIMITS_WEEKLY,\n 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)\n for x in DONATION_LIMITS_WEEKLY),\n 'yearly': tuple((x * Decimal(52)).quantize(D_CENT)\n for x in DONATION_LIMITS_WEEKLY),\n}\nDONATION_WEEKLY_MIN, DONATION_WEEKLY_MAX = DONATION_LIMITS_WEEKLY\n\nELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n\nEPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\nEVENTS = [\n Event('income', 1, _(\"When I receive money\")),\n Event('low_balance', 2, _(\"When there isn't enough money in my wallet to cover my donations\")),\n Event('withdrawal_created', 4, _(\"When a transfer to my bank account is initiated\")),\n Event('withdrawal_failed', 8, _(\"When a transfer to my bank account fails\")),\n Event('pledgee_joined', 16, _(\"When someone I pledge to joins Liberapay\")),\n Event('team_invite', 32, _(\"When someone invites me to join a team\")),\n Event('payin_bankwire_failed', 64, _(\"When a bank wire transfer to my Liberapay wallet fails\")),\n Event('payin_bankwire_succeeded', 128, _(\"When a bank wire transfer to my Liberapay wallet succeeds\")),\n]\ncheck_bits([e.bit for e in EVENTS])\nEVENTS = OrderedDict((e.name, e) for e in EVENTS)\nEVENTS_S = ' '.join(EVENTS.keys())\n\n# https://www.mangopay.com/pricing/\nFEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), Decimal(0)) # 0.5%\nFEE_PAYIN_CARD = Fees(Decimal('0.018'), Decimal('0.18')) # 1.8% + \u20ac0.18\nFEE_PAYOUT = Fees(Decimal(0), Decimal(0))\nFEE_PAYOUT_OUTSIDE_SEPA = Fees(Decimal(0), Decimal('2.5'))\nFEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%\nFEE_VAT = Decimal('0.17') # 17% (Luxembourg rate)\n\nJINJA_ENV_COMMON = dict(\n trim_blocks=True, lstrip_blocks=True,\n line_statement_prefix='%',\n # undefined=CustomUndefined,\n)\n\n# https://docs.mangopay.com/api-references/kyc-rules/\nKYC_PAYIN_YEARLY_THRESHOLD = Decimal('2500')\nKYC_PAYOUT_YEARLY_THRESHOLD = Decimal('1000')\n\nLAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\nPAYIN_BANK_WIRE_MIN = Decimal('2.00')\nPAYIN_CARD_MIN = Decimal(\"15.00\") # fee \u2248 3.5%\nPAYIN_CARD_TARGET = Decimal(\"92.00\") # fee \u2248 2.33%\n\nPERIOD_CONVERSION_RATES = {\n 'weekly': Decimal(1),\n 'monthly': Decimal(12) / Decimal(52),\n 'yearly': Decimal(1) / Decimal(52),\n}\n\nPOSTAL_ADDRESS_KEYS = (\n 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'\n)\n\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results on Liberapay.\")),\n ('profile_noindex', _(\"Tell web search engines not to index my profile.\")),\n ('hide_from_lists', _(\"Prevent my profile from being listed on Liberapay.\")),\n])\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nPRIVILEGES = dict(admin=1, run_payday=2)\ncheck_bits(list(PRIVILEGES.values()))\n\nQUARANTINE = timedelta(weeks=4)\n\nSEPA_ZONE = set(\"\"\"\n AT BE BG CH CY CZ DE DK EE ES ES FI FR GB GI GR HR HU IE IS IT LI LT LU LV\n MC MT NL NO PL PT RO SE SI SK\n\"\"\".split())\n\nSESSION = str('session') # bytes in python2, unicode in python3\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\nSTANDARD_TIPS = (\n (_(\"Symbolic ({0} per week)\"), Decimal('0.01')),\n (_(\"Small ({0} per week)\"), Decimal('0.25')),\n (_(\"Medium ({0} per week)\"), Decimal('1.00')),\n (_(\"Large ({0} per week)\"), Decimal('5.00')),\n (_(\"Maximum ({0} per week)\"), DONATION_WEEKLY_MAX),\n)\n\nUSERNAME_MAX_SIZE = 32\n\ndel _\n", "path": "liberapay/constants.py"}], "after_files": [{"content": "# coding: utf8\nfrom __future__ import print_function, unicode_literals\n\nfrom collections import namedtuple, OrderedDict\nfrom datetime import date, datetime, timedelta\nfrom decimal import Decimal, ROUND_UP\nimport re\n\nfrom jinja2 import StrictUndefined\nfrom pando.utils import utc\n\n\nclass CustomUndefined(StrictUndefined):\n __bool__ = __nonzero__ = lambda self: False\n\n def __str__(self):\n try:\n self._fail_with_undefined_error()\n except Exception as e:\n self._tell_sentry(e, {})\n return ''\n\n __unicode__ = __str__\n\n\ndef check_bits(bits):\n assert len(set(bits)) == len(bits) # no duplicates\n assert not [b for b in bits if '{0:b}'.format(b).count('1') != 1] # single bit\n\n\nEvent = namedtuple('Event', 'name bit title')\n\nFees = namedtuple('Fees', ('var', 'fix'))\n\n\n_ = lambda a: a\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_\")\n\nAVATAR_QUERY = '?s=160&default=retro'\nAVATAR_SOURCES = 'libravatar bitbucket facebook github google twitter'.split()\n\nBIRTHDAY = date(2015, 5, 22)\n\nD_CENT = Decimal('0.01')\nD_INF = Decimal('inf')\nD_UNIT = Decimal('1.00')\nD_ZERO = Decimal('0.00')\n\nDONATION_LIMITS_WEEKLY = (Decimal('0.01'), Decimal('100.00'))\nDONATION_LIMITS = {\n 'weekly': DONATION_LIMITS_WEEKLY,\n 'monthly': tuple((x * Decimal(52) / Decimal(12)).quantize(D_CENT, rounding=ROUND_UP)\n for x in DONATION_LIMITS_WEEKLY),\n 'yearly': tuple((x * Decimal(52)).quantize(D_CENT)\n for x in DONATION_LIMITS_WEEKLY),\n}\nDONATION_WEEKLY_MIN, DONATION_WEEKLY_MAX = DONATION_LIMITS_WEEKLY\n\nELSEWHERE_ACTIONS = {'connect', 'lock', 'unlock'}\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'''\n # This is the regexp used by MangoPay (as of February 2017).\n # It rejects some valid but exotic addresses.\n # https://en.wikipedia.org/wiki/Email_address\n ^\n [a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*\n @\n ([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\\.)+[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\n $\n''', re.VERBOSE)\n\nEPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, utc)\n\nEVENTS = [\n Event('income', 1, _(\"When I receive money\")),\n Event('low_balance', 2, _(\"When there isn't enough money in my wallet to cover my donations\")),\n Event('withdrawal_created', 4, _(\"When a transfer to my bank account is initiated\")),\n Event('withdrawal_failed', 8, _(\"When a transfer to my bank account fails\")),\n Event('pledgee_joined', 16, _(\"When someone I pledge to joins Liberapay\")),\n Event('team_invite', 32, _(\"When someone invites me to join a team\")),\n Event('payin_bankwire_failed', 64, _(\"When a bank wire transfer to my Liberapay wallet fails\")),\n Event('payin_bankwire_succeeded', 128, _(\"When a bank wire transfer to my Liberapay wallet succeeds\")),\n]\ncheck_bits([e.bit for e in EVENTS])\nEVENTS = OrderedDict((e.name, e) for e in EVENTS)\nEVENTS_S = ' '.join(EVENTS.keys())\n\n# https://www.mangopay.com/pricing/\nFEE_PAYIN_BANK_WIRE = Fees(Decimal('0.005'), Decimal(0)) # 0.5%\nFEE_PAYIN_CARD = Fees(Decimal('0.018'), Decimal('0.18')) # 1.8% + \u20ac0.18\nFEE_PAYOUT = Fees(Decimal(0), Decimal(0))\nFEE_PAYOUT_OUTSIDE_SEPA = Fees(Decimal(0), Decimal('2.5'))\nFEE_PAYOUT_WARN = Decimal('0.03') # warn user when fee exceeds 3%\nFEE_VAT = Decimal('0.17') # 17% (Luxembourg rate)\n\nJINJA_ENV_COMMON = dict(\n trim_blocks=True, lstrip_blocks=True,\n line_statement_prefix='%',\n # undefined=CustomUndefined,\n)\n\n# https://docs.mangopay.com/api-references/kyc-rules/\nKYC_PAYIN_YEARLY_THRESHOLD = Decimal('2500')\nKYC_PAYOUT_YEARLY_THRESHOLD = Decimal('1000')\n\nLAUNCH_TIME = datetime(2016, 2, 3, 12, 50, 0, 0, utc)\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\nPAYIN_BANK_WIRE_MIN = Decimal('2.00')\nPAYIN_CARD_MIN = Decimal(\"15.00\") # fee \u2248 3.5%\nPAYIN_CARD_TARGET = Decimal(\"92.00\") # fee \u2248 2.33%\n\nPERIOD_CONVERSION_RATES = {\n 'weekly': Decimal(1),\n 'monthly': Decimal(12) / Decimal(52),\n 'yearly': Decimal(1) / Decimal(52),\n}\n\nPOSTAL_ADDRESS_KEYS = (\n 'AddressLine1', 'AddressLine2', 'City', 'Region', 'PostalCode', 'Country'\n)\n\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results on Liberapay.\")),\n ('profile_noindex', _(\"Tell web search engines not to index my profile.\")),\n ('hide_from_lists', _(\"Prevent my profile from being listed on Liberapay.\")),\n])\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nPRIVILEGES = dict(admin=1, run_payday=2)\ncheck_bits(list(PRIVILEGES.values()))\n\nQUARANTINE = timedelta(weeks=4)\n\nSEPA_ZONE = set(\"\"\"\n AT BE BG CH CY CZ DE DK EE ES ES FI FR GB GI GR HR HU IE IS IT LI LT LU LV\n MC MT NL NO PL PT RO SE SI SK\n\"\"\".split())\n\nSESSION = str('session') # bytes in python2, unicode in python3\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\nSTANDARD_TIPS = (\n (_(\"Symbolic ({0} per week)\"), Decimal('0.01')),\n (_(\"Small ({0} per week)\"), Decimal('0.25')),\n (_(\"Medium ({0} per week)\"), Decimal('1.00')),\n (_(\"Large ({0} per week)\"), Decimal('5.00')),\n (_(\"Maximum ({0} per week)\"), DONATION_WEEKLY_MAX),\n)\n\nUSERNAME_MAX_SIZE = 32\n\ndel _\n", "path": "liberapay/constants.py"}]}
| 3,433 | 297 |
gh_patches_debug_36141
|
rasdani/github-patches
|
git_diff
|
PennyLaneAI__pennylane-4831
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Unexpected behavior with merge_amplitude_embedding and batched input
### Expected behavior
When using merge_amplitude_embedding to combine two separate embeddings, the output should be the same as two separate MottonenStatePreparations.
### Actual behavior
Instead, what I observe is that the output not only doesn't match the batch size, but it's also incorrect.
### Additional information
Removing `@qml.transforms.merge_amplitude_embedding` solves the issue, but I'm not sure it's intended.
### Source code
```shell
import pennylane as qml
import torch
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="torch")
def circuit1(a, b):
qml.MottonenStatePreparation(a, wires=0)
qml.MottonenStatePreparation(b, wires=1)
return qml.probs(wires=range(2))
a = torch.randn((3,2))
b = torch.randn((3,2))
a = a/a.norm(dim=1, keepdim=True)
b = b/b.norm(dim=1, keepdim=True)
print('a:')
print(a)
print('b:')
print(b)
print('Batched call to circuit1')
print(circuit1(a, b).detach().numpy())
# Output:
# a:
# tensor([[ 0.4929, 0.8701],
# [ 0.7628, 0.6466],
# [-0.6488, -0.7610]])
# b:
# tensor([[-0.6827, -0.7307],
# [ 0.6346, -0.7728],
# [ 0.2947, -0.9556]])
# Batched call to circuit1
# [[0.11325859 0.12973125 0.35284562 0.40416454]
# [0.23433313 0.34756744 0.16836992 0.24972952]
# [0.03655629 0.38435813 0.0502934 0.52879218]]
@qml.transforms.merge_amplitude_embedding
@qml.qnode(dev, interface="torch")
def circuit2(a, b):
qml.AmplitudeEmbedding(a, wires=0)
qml.AmplitudeEmbedding(b, wires=1)
return qml.probs(wires=range(2))
print('Batched call to circuit2')
print(circuit2(a, b).detach().numpy())
print('Repeated call to circuit2')
print(circuit2(a[0], b[0]).detach().numpy())
print(circuit2(a[1], b[1]).detach().numpy())
print(circuit2(a[2], b[2]).detach().numpy())
# Output:
# Batched call to circuit2
# [1. 0. 0. 0.]
# Repeated call to circuit2
# [1. 0. 0. 0.]
# [1. 0. 0. 0.]
# [1. 0. 0. 0.]
```
### Tracebacks
_No response_
### System information
```shell
Name: PennyLane
Version: 0.33.0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/PennyLaneAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: /home/tiblias/miniconda3/envs/qc-gpu/lib/python3.10/site-packages
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions
Required-by: PennyLane-Lightning, PennyLane-Lightning-GPU, PennyLane-qiskit, pennylane-qulacs
Platform info: Linux-6.2.0-36-generic-x86_64-with-glibc2.35
Python version: 3.10.12
Numpy version: 1.23.5
Scipy version: 1.11.2
Installed devices:
- default.gaussian (PennyLane-0.33.0)
- default.mixed (PennyLane-0.33.0)
- default.qubit (PennyLane-0.33.0)
- default.qubit.autograd (PennyLane-0.33.0)
- default.qubit.jax (PennyLane-0.33.0)
- default.qubit.legacy (PennyLane-0.33.0)
- default.qubit.tf (PennyLane-0.33.0)
- default.qubit.torch (PennyLane-0.33.0)
- default.qutrit (PennyLane-0.33.0)
- null.qubit (PennyLane-0.33.0)
- lightning.qubit (PennyLane-Lightning-0.33.1)
- lightning.gpu (PennyLane-Lightning-GPU-0.31.0)
- qiskit.aer (PennyLane-qiskit-0.31.0)
- qiskit.basicaer (PennyLane-qiskit-0.31.0)
- qiskit.ibmq (PennyLane-qiskit-0.31.0)
- qiskit.ibmq.circuit_runner (PennyLane-qiskit-0.31.0)
- qiskit.ibmq.sampler (PennyLane-qiskit-0.31.0)
- qiskit.remote (PennyLane-qiskit-0.31.0)
- qulacs.simulator (pennylane-qulacs-0.29.0)
```
### Existing GitHub issues
- [X] I have searched existing GitHub issues to make sure the issue does not already exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/transforms/optimization/merge_amplitude_embedding.py`
Content:
```
1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Transform for merging AmplitudeEmbedding gates in a quantum circuit."""
15 from typing import Sequence, Callable
16
17 from pennylane.transforms import transform
18 from pennylane.tape import QuantumTape
19 from pennylane import AmplitudeEmbedding
20 from pennylane._device import DeviceError
21 from pennylane.math import flatten, reshape
22
23
24 @transform
25 def merge_amplitude_embedding(tape: QuantumTape) -> (Sequence[QuantumTape], Callable):
26 r"""Quantum function transform to combine amplitude embedding templates that act on different qubits.
27
28 Args:
29 tape (QNode or QuantumTape or Callable): A quantum circuit.
30
31 Returns:
32 qnode (QNode) or quantum function (Callable) or tuple[List[.QuantumTape], function]: The transformed circuit as described in :func:`qml.transform <pennylane.transform>`.
33
34
35 **Example**
36
37 >>> dev = qml.device('default.qubit', wires=4)
38
39 You can apply the transform directly on :class:`QNode`:
40
41 .. code-block:: python
42
43 @qml.transforms.merge_amplitude_embedding
44 @qml.qnode(device=dev)
45 def circuit():
46 qml.CNOT(wires = [0,1])
47 qml.AmplitudeEmbedding([0,1], wires = 2)
48 qml.AmplitudeEmbedding([0,1], wires = 3)
49 return qml.state()
50
51 >>> circuit()
52 [1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
53
54 .. details::
55 :title: Usage Details
56
57 You can also apply it on quantum function.
58
59 .. code-block:: python
60
61 def qfunc():
62 qml.CNOT(wires = [0,1])
63 qml.AmplitudeEmbedding([0,1], wires = 2)
64 qml.AmplitudeEmbedding([0,1], wires = 3)
65 return qml.state()
66
67 The circuit before compilation will not work because of using two amplitude embedding.
68
69 Using the transformation we can join the different amplitude embedding into a single one:
70
71 >>> optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)
72 >>> optimized_qnode = qml.QNode(optimized_qfunc, dev)
73 >>> print(qml.draw(optimized_qnode)())
74 0: ─╭●──────────────────────┤ State
75 1: ─╰X──────────────────────┤ State
76 2: ─╭AmplitudeEmbedding(M0)─┤ State
77 3: ─╰AmplitudeEmbedding(M0)─┤ State
78 M0 =
79 [0.+0.j 0.+0.j 0.+0.j 1.+0.j]
80
81 """
82 # Make a working copy of the list to traverse
83 list_copy = tape.operations.copy()
84 not_amplitude_embedding = []
85 visited_wires = set()
86 input_wires, input_vectors, input_batch_size = [], [], []
87 while len(list_copy) > 0:
88 current_gate = list_copy[0]
89 wires_set = set(current_gate.wires)
90
91 # Check if the current gate is an AmplitudeEmbedding.
92 if not isinstance(current_gate, AmplitudeEmbedding):
93 not_amplitude_embedding.append(current_gate)
94 list_copy.pop(0)
95 visited_wires = visited_wires.union(wires_set)
96 continue
97
98 # Check the qubits have not been used.
99 if len(visited_wires.intersection(wires_set)) > 0:
100 raise DeviceError(
101 f"Operation {current_gate.name} cannot be used after other Operation applied in the same qubit "
102 )
103 input_wires.append(current_gate.wires)
104 input_vectors.append(current_gate.parameters[0])
105 input_batch_size.append(current_gate.batch_size)
106 list_copy.pop(0)
107 visited_wires = visited_wires.union(wires_set)
108
109 if len(input_wires) > 0:
110 final_wires = input_wires[0]
111 final_vector = input_vectors[0]
112 final_batch_size = input_batch_size[0]
113
114 # Merge all parameters and qubits into a single one.
115 for w, v, b in zip(input_wires[1:], input_vectors[1:], input_batch_size[1:]):
116 final_vector = final_vector[..., :, None] * v[..., None, :]
117 final_batch_size = final_batch_size or b
118 final_wires = final_wires + w
119
120 if final_batch_size:
121 final_vector = reshape(final_vector, (final_batch_size, -1))
122 else:
123 final_vector = flatten(final_vector)
124
125 AmplitudeEmbedding(final_vector, wires=final_wires)
126
127 new_operations = []
128 for gate in not_amplitude_embedding:
129 new_operations.append(gate)
130
131 new_tape = type(tape)(new_operations, tape.measurements, shots=tape.shots)
132
133 def null_postprocessing(results):
134 """A postprocesing function returned by a transform that only converts the batch of results
135 into a result for a single ``QuantumTape``.
136 """
137 return results[0]
138
139 return [new_tape], null_postprocessing
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pennylane/transforms/optimization/merge_amplitude_embedding.py b/pennylane/transforms/optimization/merge_amplitude_embedding.py
--- a/pennylane/transforms/optimization/merge_amplitude_embedding.py
+++ b/pennylane/transforms/optimization/merge_amplitude_embedding.py
@@ -19,6 +19,7 @@
from pennylane import AmplitudeEmbedding
from pennylane._device import DeviceError
from pennylane.math import flatten, reshape
+from pennylane.queuing import QueuingManager
@transform
@@ -79,19 +80,15 @@
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]
"""
- # Make a working copy of the list to traverse
- list_copy = tape.operations.copy()
- not_amplitude_embedding = []
+ new_operations = []
visited_wires = set()
input_wires, input_vectors, input_batch_size = [], [], []
- while len(list_copy) > 0:
- current_gate = list_copy[0]
+ for current_gate in tape.operations:
wires_set = set(current_gate.wires)
# Check if the current gate is an AmplitudeEmbedding.
if not isinstance(current_gate, AmplitudeEmbedding):
- not_amplitude_embedding.append(current_gate)
- list_copy.pop(0)
+ new_operations.append(current_gate)
visited_wires = visited_wires.union(wires_set)
continue
@@ -103,7 +100,6 @@
input_wires.append(current_gate.wires)
input_vectors.append(current_gate.parameters[0])
input_batch_size.append(current_gate.batch_size)
- list_copy.pop(0)
visited_wires = visited_wires.union(wires_set)
if len(input_wires) > 0:
@@ -122,11 +118,8 @@
else:
final_vector = flatten(final_vector)
- AmplitudeEmbedding(final_vector, wires=final_wires)
-
- new_operations = []
- for gate in not_amplitude_embedding:
- new_operations.append(gate)
+ with QueuingManager.stop_recording():
+ new_operations.insert(0, AmplitudeEmbedding(final_vector, wires=final_wires))
new_tape = type(tape)(new_operations, tape.measurements, shots=tape.shots)
|
{"golden_diff": "diff --git a/pennylane/transforms/optimization/merge_amplitude_embedding.py b/pennylane/transforms/optimization/merge_amplitude_embedding.py\n--- a/pennylane/transforms/optimization/merge_amplitude_embedding.py\n+++ b/pennylane/transforms/optimization/merge_amplitude_embedding.py\n@@ -19,6 +19,7 @@\n from pennylane import AmplitudeEmbedding\n from pennylane._device import DeviceError\n from pennylane.math import flatten, reshape\n+from pennylane.queuing import QueuingManager\n \n \n @transform\n@@ -79,19 +80,15 @@\n [0.+0.j 0.+0.j 0.+0.j 1.+0.j]\n \n \"\"\"\n- # Make a working copy of the list to traverse\n- list_copy = tape.operations.copy()\n- not_amplitude_embedding = []\n+ new_operations = []\n visited_wires = set()\n input_wires, input_vectors, input_batch_size = [], [], []\n- while len(list_copy) > 0:\n- current_gate = list_copy[0]\n+ for current_gate in tape.operations:\n wires_set = set(current_gate.wires)\n \n # Check if the current gate is an AmplitudeEmbedding.\n if not isinstance(current_gate, AmplitudeEmbedding):\n- not_amplitude_embedding.append(current_gate)\n- list_copy.pop(0)\n+ new_operations.append(current_gate)\n visited_wires = visited_wires.union(wires_set)\n continue\n \n@@ -103,7 +100,6 @@\n input_wires.append(current_gate.wires)\n input_vectors.append(current_gate.parameters[0])\n input_batch_size.append(current_gate.batch_size)\n- list_copy.pop(0)\n visited_wires = visited_wires.union(wires_set)\n \n if len(input_wires) > 0:\n@@ -122,11 +118,8 @@\n else:\n final_vector = flatten(final_vector)\n \n- AmplitudeEmbedding(final_vector, wires=final_wires)\n-\n- new_operations = []\n- for gate in not_amplitude_embedding:\n- new_operations.append(gate)\n+ with QueuingManager.stop_recording():\n+ new_operations.insert(0, AmplitudeEmbedding(final_vector, wires=final_wires))\n \n new_tape = type(tape)(new_operations, tape.measurements, shots=tape.shots)\n", "issue": "[BUG] Unexpected behavior with merge_amplitude_embedding and batched input\n### Expected behavior\n\nWhen using merge_amplitude_embedding to combine two separate embeddings, the output should be the same as two separate MottonenStatePreparations.\n\n### Actual behavior\n\nInstead, what I observe is that the output not only doesn't match the batch size, but it's also incorrect.\r\n\n\n### Additional information\n\nRemoving `@qml.transforms.merge_amplitude_embedding` solves the issue, but I'm not sure it's intended.\n\n### Source code\n\n```shell\nimport pennylane as qml\r\nimport torch\r\n\r\ndev = qml.device(\"default.qubit\", wires=2)\r\n\r\[email protected](dev, interface=\"torch\")\r\ndef circuit1(a, b): \r\n qml.MottonenStatePreparation(a, wires=0)\r\n qml.MottonenStatePreparation(b, wires=1)\r\n return qml.probs(wires=range(2))\r\n\r\na = torch.randn((3,2))\r\nb = torch.randn((3,2))\r\na = a/a.norm(dim=1, keepdim=True)\r\nb = b/b.norm(dim=1, keepdim=True)\r\n\r\nprint('a:')\r\nprint(a)\r\nprint('b:')\r\nprint(b)\r\n\r\nprint('Batched call to circuit1')\r\nprint(circuit1(a, b).detach().numpy())\r\n\r\n# Output:\r\n# a:\r\n# tensor([[ 0.4929, 0.8701],\r\n# [ 0.7628, 0.6466],\r\n# [-0.6488, -0.7610]])\r\n# b:\r\n# tensor([[-0.6827, -0.7307],\r\n# [ 0.6346, -0.7728],\r\n# [ 0.2947, -0.9556]])\r\n# Batched call to circuit1\r\n# [[0.11325859 0.12973125 0.35284562 0.40416454]\r\n# [0.23433313 0.34756744 0.16836992 0.24972952]\r\n# [0.03655629 0.38435813 0.0502934 0.52879218]]\r\n\r\n\r\[email protected]_amplitude_embedding\r\[email protected](dev, interface=\"torch\")\r\ndef circuit2(a, b): \r\n qml.AmplitudeEmbedding(a, wires=0)\r\n qml.AmplitudeEmbedding(b, wires=1)\r\n return qml.probs(wires=range(2))\r\n\r\nprint('Batched call to circuit2')\r\nprint(circuit2(a, b).detach().numpy())\r\n\r\nprint('Repeated call to circuit2')\r\nprint(circuit2(a[0], b[0]).detach().numpy())\r\nprint(circuit2(a[1], b[1]).detach().numpy())\r\nprint(circuit2(a[2], b[2]).detach().numpy())\r\n\r\n\r\n# Output:\r\n# Batched call to circuit2\r\n# [1. 0. 0. 0.]\r\n# Repeated call to circuit2\r\n# [1. 0. 0. 0.]\r\n# [1. 0. 0. 0.]\r\n# [1. 0. 0. 0.]\n```\n\n\n### Tracebacks\n\n_No response_\n\n### System information\n\n```shell\nName: PennyLane\r\nVersion: 0.33.0\r\nSummary: PennyLane is a Python quantum machine learning library by Xanadu Inc.\r\nHome-page: https://github.com/PennyLaneAI/pennylane\r\nAuthor: \r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: /home/tiblias/miniconda3/envs/qc-gpu/lib/python3.10/site-packages\r\nRequires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml, typing-extensions\r\nRequired-by: PennyLane-Lightning, PennyLane-Lightning-GPU, PennyLane-qiskit, pennylane-qulacs\r\n\r\nPlatform info: Linux-6.2.0-36-generic-x86_64-with-glibc2.35\r\nPython version: 3.10.12\r\nNumpy version: 1.23.5\r\nScipy version: 1.11.2\r\nInstalled devices:\r\n- default.gaussian (PennyLane-0.33.0)\r\n- default.mixed (PennyLane-0.33.0)\r\n- default.qubit (PennyLane-0.33.0)\r\n- default.qubit.autograd (PennyLane-0.33.0)\r\n- default.qubit.jax (PennyLane-0.33.0)\r\n- default.qubit.legacy (PennyLane-0.33.0)\r\n- default.qubit.tf (PennyLane-0.33.0)\r\n- default.qubit.torch (PennyLane-0.33.0)\r\n- default.qutrit (PennyLane-0.33.0)\r\n- null.qubit (PennyLane-0.33.0)\r\n- lightning.qubit (PennyLane-Lightning-0.33.1)\r\n- lightning.gpu (PennyLane-Lightning-GPU-0.31.0)\r\n- qiskit.aer (PennyLane-qiskit-0.31.0)\r\n- qiskit.basicaer (PennyLane-qiskit-0.31.0)\r\n- qiskit.ibmq (PennyLane-qiskit-0.31.0)\r\n- qiskit.ibmq.circuit_runner (PennyLane-qiskit-0.31.0)\r\n- qiskit.ibmq.sampler (PennyLane-qiskit-0.31.0)\r\n- qiskit.remote (PennyLane-qiskit-0.31.0)\r\n- qulacs.simulator (pennylane-qulacs-0.29.0)\n```\n\n\n### Existing GitHub issues\n\n- [X] I have searched existing GitHub issues to make sure the issue does not already exist.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Transform for merging AmplitudeEmbedding gates in a quantum circuit.\"\"\"\nfrom typing import Sequence, Callable\n\nfrom pennylane.transforms import transform\nfrom pennylane.tape import QuantumTape\nfrom pennylane import AmplitudeEmbedding\nfrom pennylane._device import DeviceError\nfrom pennylane.math import flatten, reshape\n\n\n@transform\ndef merge_amplitude_embedding(tape: QuantumTape) -> (Sequence[QuantumTape], Callable):\n r\"\"\"Quantum function transform to combine amplitude embedding templates that act on different qubits.\n\n Args:\n tape (QNode or QuantumTape or Callable): A quantum circuit.\n\n Returns:\n qnode (QNode) or quantum function (Callable) or tuple[List[.QuantumTape], function]: The transformed circuit as described in :func:`qml.transform <pennylane.transform>`.\n\n\n **Example**\n\n >>> dev = qml.device('default.qubit', wires=4)\n\n You can apply the transform directly on :class:`QNode`:\n\n .. code-block:: python\n\n @qml.transforms.merge_amplitude_embedding\n @qml.qnode(device=dev)\n def circuit():\n qml.CNOT(wires = [0,1])\n qml.AmplitudeEmbedding([0,1], wires = 2)\n qml.AmplitudeEmbedding([0,1], wires = 3)\n return qml.state()\n\n >>> circuit()\n [1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j]\n\n .. details::\n :title: Usage Details\n\n You can also apply it on quantum function.\n\n .. code-block:: python\n\n def qfunc():\n qml.CNOT(wires = [0,1])\n qml.AmplitudeEmbedding([0,1], wires = 2)\n qml.AmplitudeEmbedding([0,1], wires = 3)\n return qml.state()\n\n The circuit before compilation will not work because of using two amplitude embedding.\n\n Using the transformation we can join the different amplitude embedding into a single one:\n\n >>> optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)\n >>> optimized_qnode = qml.QNode(optimized_qfunc, dev)\n >>> print(qml.draw(optimized_qnode)())\n 0: \u2500\u256d\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 State\n 1: \u2500\u2570X\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 State\n 2: \u2500\u256dAmplitudeEmbedding(M0)\u2500\u2524 State\n 3: \u2500\u2570AmplitudeEmbedding(M0)\u2500\u2524 State\n M0 =\n [0.+0.j 0.+0.j 0.+0.j 1.+0.j]\n\n \"\"\"\n # Make a working copy of the list to traverse\n list_copy = tape.operations.copy()\n not_amplitude_embedding = []\n visited_wires = set()\n input_wires, input_vectors, input_batch_size = [], [], []\n while len(list_copy) > 0:\n current_gate = list_copy[0]\n wires_set = set(current_gate.wires)\n\n # Check if the current gate is an AmplitudeEmbedding.\n if not isinstance(current_gate, AmplitudeEmbedding):\n not_amplitude_embedding.append(current_gate)\n list_copy.pop(0)\n visited_wires = visited_wires.union(wires_set)\n continue\n\n # Check the qubits have not been used.\n if len(visited_wires.intersection(wires_set)) > 0:\n raise DeviceError(\n f\"Operation {current_gate.name} cannot be used after other Operation applied in the same qubit \"\n )\n input_wires.append(current_gate.wires)\n input_vectors.append(current_gate.parameters[0])\n input_batch_size.append(current_gate.batch_size)\n list_copy.pop(0)\n visited_wires = visited_wires.union(wires_set)\n\n if len(input_wires) > 0:\n final_wires = input_wires[0]\n final_vector = input_vectors[0]\n final_batch_size = input_batch_size[0]\n\n # Merge all parameters and qubits into a single one.\n for w, v, b in zip(input_wires[1:], input_vectors[1:], input_batch_size[1:]):\n final_vector = final_vector[..., :, None] * v[..., None, :]\n final_batch_size = final_batch_size or b\n final_wires = final_wires + w\n\n if final_batch_size:\n final_vector = reshape(final_vector, (final_batch_size, -1))\n else:\n final_vector = flatten(final_vector)\n\n AmplitudeEmbedding(final_vector, wires=final_wires)\n\n new_operations = []\n for gate in not_amplitude_embedding:\n new_operations.append(gate)\n\n new_tape = type(tape)(new_operations, tape.measurements, shots=tape.shots)\n\n def null_postprocessing(results):\n \"\"\"A postprocesing function returned by a transform that only converts the batch of results\n into a result for a single ``QuantumTape``.\n \"\"\"\n return results[0]\n\n return [new_tape], null_postprocessing\n", "path": "pennylane/transforms/optimization/merge_amplitude_embedding.py"}], "after_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Transform for merging AmplitudeEmbedding gates in a quantum circuit.\"\"\"\nfrom typing import Sequence, Callable\n\nfrom pennylane.transforms import transform\nfrom pennylane.tape import QuantumTape\nfrom pennylane import AmplitudeEmbedding\nfrom pennylane._device import DeviceError\nfrom pennylane.math import flatten, reshape\nfrom pennylane.queuing import QueuingManager\n\n\n@transform\ndef merge_amplitude_embedding(tape: QuantumTape) -> (Sequence[QuantumTape], Callable):\n r\"\"\"Quantum function transform to combine amplitude embedding templates that act on different qubits.\n\n Args:\n tape (QNode or QuantumTape or Callable): A quantum circuit.\n\n Returns:\n qnode (QNode) or quantum function (Callable) or tuple[List[.QuantumTape], function]: The transformed circuit as described in :func:`qml.transform <pennylane.transform>`.\n\n\n **Example**\n\n >>> dev = qml.device('default.qubit', wires=4)\n\n You can apply the transform directly on :class:`QNode`:\n\n .. code-block:: python\n\n @qml.transforms.merge_amplitude_embedding\n @qml.qnode(device=dev)\n def circuit():\n qml.CNOT(wires = [0,1])\n qml.AmplitudeEmbedding([0,1], wires = 2)\n qml.AmplitudeEmbedding([0,1], wires = 3)\n return qml.state()\n\n >>> circuit()\n [1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j]\n\n .. details::\n :title: Usage Details\n\n You can also apply it on quantum function.\n\n .. code-block:: python\n\n def qfunc():\n qml.CNOT(wires = [0,1])\n qml.AmplitudeEmbedding([0,1], wires = 2)\n qml.AmplitudeEmbedding([0,1], wires = 3)\n return qml.state()\n\n The circuit before compilation will not work because of using two amplitude embedding.\n\n Using the transformation we can join the different amplitude embedding into a single one:\n\n >>> optimized_qfunc = qml.transforms.merge_amplitude_embedding(qfunc)\n >>> optimized_qnode = qml.QNode(optimized_qfunc, dev)\n >>> print(qml.draw(optimized_qnode)())\n 0: \u2500\u256d\u25cf\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 State\n 1: \u2500\u2570X\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 State\n 2: \u2500\u256dAmplitudeEmbedding(M0)\u2500\u2524 State\n 3: \u2500\u2570AmplitudeEmbedding(M0)\u2500\u2524 State\n M0 =\n [0.+0.j 0.+0.j 0.+0.j 1.+0.j]\n\n \"\"\"\n new_operations = []\n visited_wires = set()\n input_wires, input_vectors, input_batch_size = [], [], []\n for current_gate in tape.operations:\n wires_set = set(current_gate.wires)\n\n # Check if the current gate is an AmplitudeEmbedding.\n if not isinstance(current_gate, AmplitudeEmbedding):\n new_operations.append(current_gate)\n visited_wires = visited_wires.union(wires_set)\n continue\n\n # Check the qubits have not been used.\n if len(visited_wires.intersection(wires_set)) > 0:\n raise DeviceError(\n f\"Operation {current_gate.name} cannot be used after other Operation applied in the same qubit \"\n )\n input_wires.append(current_gate.wires)\n input_vectors.append(current_gate.parameters[0])\n input_batch_size.append(current_gate.batch_size)\n visited_wires = visited_wires.union(wires_set)\n\n if len(input_wires) > 0:\n final_wires = input_wires[0]\n final_vector = input_vectors[0]\n final_batch_size = input_batch_size[0]\n\n # Merge all parameters and qubits into a single one.\n for w, v, b in zip(input_wires[1:], input_vectors[1:], input_batch_size[1:]):\n final_vector = final_vector[..., :, None] * v[..., None, :]\n final_batch_size = final_batch_size or b\n final_wires = final_wires + w\n\n if final_batch_size:\n final_vector = reshape(final_vector, (final_batch_size, -1))\n else:\n final_vector = flatten(final_vector)\n\n with QueuingManager.stop_recording():\n new_operations.insert(0, AmplitudeEmbedding(final_vector, wires=final_wires))\n\n new_tape = type(tape)(new_operations, tape.measurements, shots=tape.shots)\n\n def null_postprocessing(results):\n \"\"\"A postprocesing function returned by a transform that only converts the batch of results\n into a result for a single ``QuantumTape``.\n \"\"\"\n return results[0]\n\n return [new_tape], null_postprocessing\n", "path": "pennylane/transforms/optimization/merge_amplitude_embedding.py"}]}
| 3,327 | 535 |
gh_patches_debug_8117
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1598
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Configuration validator warning could be more useful + accurate
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description
The Opsdroid warning "Opsdroid has a new configuration format since version 0.17.0, we will change your configuration now. Please read on how to migrate in the documentation." indicates that the daemon itself will re-write your configuration file, but this is not the case.
It could also include a link to how one might be able to migrate their config-file, saving frantic seconds of Googling :-)
## Steps to Reproduce
Just use an otherwise-working config-file that doesn't conform to the new standard
## Expected Functionality
Warning message should reflect reality (shouldn't say "We will change your config now") and hint to the user where they can directly read up on the new format (such as via including a link)
## Experienced Functionality
Explain what happened instead(Please include the debug log).
## Versions
- **Opsdroid version:** 0.19.0
- **Python version:** 3.8.4
- **OS/Docker version:**
## Configuration File
Please include your version of the configuration file below.
```yaml
# Your code goes here.
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/helper.py`
Content:
```
1 """Helper functions to use within OpsDroid."""
2
3 import datetime
4 import os
5 import stat
6 import logging
7 import json
8
9 import nbformat
10 from nbconvert import PythonExporter
11
12 _LOGGER = logging.getLogger(__name__)
13
14
15 # pylint: disable=inconsistent-return-statements
16 def get_opsdroid():
17 """Return the running opsdroid instance.
18
19 Returns:
20 object: opsdroid instance.
21
22 """
23 from opsdroid.core import OpsDroid
24
25 if len(OpsDroid.instances) == 1:
26 return OpsDroid.instances[0]
27
28
29 def del_rw(action, name, exc):
30 """Error handler for removing read only files.
31
32 Args:
33 action: the function that raised the exception
34 name: path name passed to the function (path and file name)
35 exc: exception information return by sys.exc_info()
36
37 Raises:
38 OsError : If the file to be removed is a directory.
39
40 """
41 os.chmod(name, stat.S_IWRITE)
42 os.remove(name)
43
44
45 # This is meant to provide backwards compatibility for versions
46 # prior to 0.16.0 in the future this will be deleted
47
48
49 def convert_dictionary(modules):
50 """Convert dictionary to new format.
51
52 We iterate over all the modules in the list and change the dictionary
53 to be in the format 'name_of_module: { config_params}'
54
55 Args:
56 modules (list): List of dictionaries that contain the module configuration
57
58 Return:
59 List: New modified list following the new format.
60
61 """
62 config = dict()
63
64 if isinstance(modules, list):
65 _LOGGER.warning(
66 "Opsdroid has a new configuration format since version 0.17.0, we will change your configuration now. Please read on how to migrate in the documentation."
67 )
68 for module in modules:
69 module_copy = module.copy()
70 del module_copy["name"]
71
72 if module.get("access-token") or module.get("api-token"):
73 _LOGGER.warning(
74 _(
75 "Configuration param for %s has been deprecated in favor of 'token', please update your config."
76 ),
77 module["name"],
78 )
79 module_copy["token"] = module.get("access-token") or module.get(
80 "api-token"
81 )
82
83 config[module["name"]] = module_copy
84
85 return config
86 else:
87 return modules
88
89
90 def update_pre_0_17_config_format(config):
91 """Update each configuration param that contains 'name'.
92
93 We decided to ditch the name param and instead divide each module by it's name.
94 This change was due to validation issues. Now instead of a list of dictionaries
95 without any pointer to what they are, we are using the name of the module and then a
96 dictionary containing the configuration params for said module.
97
98 Args:
99 config (dict): Dictionary containing config got from configuration.yaml
100
101 Returns:
102 dict: updated configuration.
103
104 """
105 updated_config = {}
106 for config_type, modules in config.items():
107 if config_type in ("parsers", "connectors", "skills", "databases"):
108 updated_config[config_type] = convert_dictionary(modules)
109
110 config.update(updated_config)
111
112 return config
113
114
115 def file_is_ipython_notebook(path):
116 """Check whether a file is an iPython Notebook.
117
118 Args:
119 path (str): path to the file.
120
121 Examples:
122 path : source path with .ipynb file '/path/src/my_file.ipynb.
123
124 """
125 return path.lower().endswith(".ipynb")
126
127
128 def convert_ipynb_to_script(notebook_path, output_path):
129 """Convert an iPython Notebook to a python script.
130
131 Args:
132 notebook_path (str): path to the notebook file.
133 output_path (str): path to the script file destination.
134
135 Examples:
136 notebook_path : source path with .ipynb file '/path/src/my_file.ipynb.
137 output_path : destination path with .py file '/path/src/my_file.py.
138
139 """
140 with open(notebook_path, "r") as notebook_path_handle:
141 raw_notebook = notebook_path_handle.read()
142 notebook = nbformat.reads(raw_notebook, as_version=4)
143 script, _ = PythonExporter().from_notebook_node(notebook)
144 with open(output_path, "w") as output_path_handle:
145 output_path_handle.write(script)
146
147
148 def extract_gist_id(gist_string):
149 """Extract the gist ID from a url.
150
151 Will also work if simply passed an ID.
152
153 Args:
154 gist_string (str): Gist URL.
155
156 Returns:
157 string: The gist ID.
158
159 Examples:
160 gist_string : Gist url 'https://gist.github.com/{user}/{id}'.
161
162 """
163 return gist_string.split("/")[-1]
164
165
166 def add_skill_attributes(func):
167 """Add the attributes which makes a function a skill.
168
169 Args:
170 func (func): Skill function.
171
172 Returns:
173 func: The skill function with the new attributes.
174
175 """
176 if not hasattr(func, "skill"):
177 func.skill = True
178 if not hasattr(func, "matchers"):
179 func.matchers = []
180 if not hasattr(func, "constraints"):
181 func.constraints = []
182 return func
183
184
185 def get_parser_config(name, modules):
186 """Get parser from modules list.
187
188 After the change to the configuration we are adding the "enabled" flag to each
189 active module, this allows us to disable to module if there is any problem with
190 it. This helper method helps getting the config from the list of active parsers.
191
192 Args:
193 name (string): Name of the parser to be fetched.
194 modules (list): List of all active modules.
195
196 Returns:
197 dict or None: The module config or None if not found.
198
199 """
200 if modules:
201 for parser in modules:
202 if parser["config"]["name"] == name:
203 return parser["config"]
204 return None
205
206
207 def get_config_option(options, config, found, not_found):
208 """Get config details and return useful information to list active modules.
209
210 When we list modules we have to do a lot of search and get, this function serves as an
211 helper to get all the needed information to show in a list format. Since we are using
212 different formats and need to get 3 different details from the config we will either
213 return them or use the placeholder from `not_found`.
214
215 Args:
216 options(list): list of all possible options to search in config.
217 config(dict): This will be a section of the configuration (connectors, parsers, skills, etc).
218 found(str, bool): Expected text if option exists in config.
219 not_found(str): expected text if option doesn't exist in config.
220
221 """
222 try:
223 for option in options:
224 if config.get(option):
225 return found, option, config.get(option)
226 return not_found, not_found, not_found
227 except (TypeError, AttributeError):
228 return not_found, not_found, not_found
229
230
231 class JSONEncoder(json.JSONEncoder):
232 """A extended JSONEncoder class.
233
234 This class is customised JSONEncoder class which helps to convert
235 dict to JSON. The datetime objects are converted to dict with fields
236 as keys.
237
238 """
239
240 # pylint: disable=method-hidden
241 # See https://github.com/PyCQA/pylint/issues/414 for reference
242
243 serializers = {}
244
245 def default(self, o):
246 """Convert the given datetime object to dict.
247
248 Args:
249 o (object): The datetime object to be marshalled.
250
251 Returns:
252 dict (object): A dict with datatime object data.
253
254 Example:
255 A dict which is returned after marshalling::
256
257 {
258 "__class__": "datetime",
259 "year": 2018,
260 "month": 10,
261 "day": 2,
262 "hour": 0,
263 "minute": 41,
264 "second": 17,
265 "microsecond": 74644
266 }
267
268 """
269 marshaller = self.serializers.get(type(o), super(JSONEncoder, self).default)
270 return marshaller(o)
271
272
273 class JSONDecoder:
274 """A JSONDecoder class.
275
276 This class will convert dict containing datetime values
277 to datetime objects.
278
279 """
280
281 decoders = {}
282
283 def __call__(self, dct):
284 """Convert given dict to datetime objects.
285
286 Args:
287 dct (object): A dict containing datetime values and class type.
288
289 Returns:
290 object or dct: The datetime object for given dct, or dct if
291 respective class decoder is not found.
292
293 Example:
294 A datetime object returned after decoding::
295
296 datetime.datetime(2018, 10, 2, 0, 41, 17, 74644)
297
298 """
299 if dct.get("__class__") in self.decoders:
300 return self.decoders[dct["__class__"]](dct)
301 return dct
302
303
304 def register_json_type(type_cls, fields, decode_fn):
305 """Register JSON types.
306
307 This method will register the serializers and decoders for the
308 JSONEncoder and JSONDecoder classes respectively.
309
310 Args:
311 type_cls (object): A datetime object.
312 fields (list): List of fields used to store data in dict.
313 decode_fn (object): A lambda function object for decoding.
314
315 """
316 type_name = type_cls.__name__
317 JSONEncoder.serializers[type_cls] = lambda obj: dict(
318 __class__=type_name, **{field: getattr(obj, field) for field in fields}
319 )
320 JSONDecoder.decoders[type_name] = decode_fn
321
322
323 register_json_type(
324 datetime.datetime,
325 ["year", "month", "day", "hour", "minute", "second", "microsecond"],
326 lambda dct: datetime.datetime(
327 dct["year"],
328 dct["month"],
329 dct["day"],
330 dct["hour"],
331 dct["minute"],
332 dct["second"],
333 dct["microsecond"],
334 ),
335 )
336
337 register_json_type(
338 datetime.date,
339 ["year", "month", "day"],
340 lambda dct: datetime.date(dct["year"], dct["month"], dct["day"]),
341 )
342
343 register_json_type(
344 datetime.time,
345 ["hour", "minute", "second", "microsecond"],
346 lambda dct: datetime.time(
347 dct["hour"], dct["minute"], dct["second"], dct["microsecond"]
348 ),
349 )
350
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/helper.py b/opsdroid/helper.py
--- a/opsdroid/helper.py
+++ b/opsdroid/helper.py
@@ -63,7 +63,7 @@
if isinstance(modules, list):
_LOGGER.warning(
- "Opsdroid has a new configuration format since version 0.17.0, we will change your configuration now. Please read on how to migrate in the documentation."
+ "Opsdroid has a new configuration format since version 0.17.0. Please read on how to migrate in the documentation at https://docs.opsdroid.dev/en/stable/configuration.html#migrate-to-new-configuration-layout."
)
for module in modules:
module_copy = module.copy()
|
{"golden_diff": "diff --git a/opsdroid/helper.py b/opsdroid/helper.py\n--- a/opsdroid/helper.py\n+++ b/opsdroid/helper.py\n@@ -63,7 +63,7 @@\n \n if isinstance(modules, list):\n _LOGGER.warning(\n- \"Opsdroid has a new configuration format since version 0.17.0, we will change your configuration now. Please read on how to migrate in the documentation.\"\n+ \"Opsdroid has a new configuration format since version 0.17.0. Please read on how to migrate in the documentation at https://docs.opsdroid.dev/en/stable/configuration.html#migrate-to-new-configuration-layout.\"\n )\n for module in modules:\n module_copy = module.copy()\n", "issue": "Configuration validator warning could be more useful + accurate\n<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->\r\n# Description\r\nThe Opsdroid warning \"Opsdroid has a new configuration format since version 0.17.0, we will change your configuration now. Please read on how to migrate in the documentation.\" indicates that the daemon itself will re-write your configuration file, but this is not the case. \r\n\r\nIt could also include a link to how one might be able to migrate their config-file, saving frantic seconds of Googling :-) \r\n\r\n## Steps to Reproduce\r\nJust use an otherwise-working config-file that doesn't conform to the new standard \r\n\r\n## Expected Functionality\r\nWarning message should reflect reality (shouldn't say \"We will change your config now\") and hint to the user where they can directly read up on the new format (such as via including a link)\r\n\r\n\r\n## Experienced Functionality\r\nExplain what happened instead(Please include the debug log).\r\n\r\n## Versions\r\n- **Opsdroid version:** 0.19.0\r\n- **Python version:** 3.8.4\r\n- **OS/Docker version:**\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\n# Your code goes here.\r\n\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport datetime\nimport os\nimport stat\nimport logging\nimport json\n\nimport nbformat\nfrom nbconvert import PythonExporter\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# pylint: disable=inconsistent-return-statements\ndef get_opsdroid():\n \"\"\"Return the running opsdroid instance.\n\n Returns:\n object: opsdroid instance.\n\n \"\"\"\n from opsdroid.core import OpsDroid\n\n if len(OpsDroid.instances) == 1:\n return OpsDroid.instances[0]\n\n\ndef del_rw(action, name, exc):\n \"\"\"Error handler for removing read only files.\n\n Args:\n action: the function that raised the exception\n name: path name passed to the function (path and file name)\n exc: exception information return by sys.exc_info()\n\n Raises:\n OsError : If the file to be removed is a directory.\n\n \"\"\"\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n\n# This is meant to provide backwards compatibility for versions\n# prior to 0.16.0 in the future this will be deleted\n\n\ndef convert_dictionary(modules):\n \"\"\"Convert dictionary to new format.\n\n We iterate over all the modules in the list and change the dictionary\n to be in the format 'name_of_module: { config_params}'\n\n Args:\n modules (list): List of dictionaries that contain the module configuration\n\n Return:\n List: New modified list following the new format.\n\n \"\"\"\n config = dict()\n\n if isinstance(modules, list):\n _LOGGER.warning(\n \"Opsdroid has a new configuration format since version 0.17.0, we will change your configuration now. Please read on how to migrate in the documentation.\"\n )\n for module in modules:\n module_copy = module.copy()\n del module_copy[\"name\"]\n\n if module.get(\"access-token\") or module.get(\"api-token\"):\n _LOGGER.warning(\n _(\n \"Configuration param for %s has been deprecated in favor of 'token', please update your config.\"\n ),\n module[\"name\"],\n )\n module_copy[\"token\"] = module.get(\"access-token\") or module.get(\n \"api-token\"\n )\n\n config[module[\"name\"]] = module_copy\n\n return config\n else:\n return modules\n\n\ndef update_pre_0_17_config_format(config):\n \"\"\"Update each configuration param that contains 'name'.\n\n We decided to ditch the name param and instead divide each module by it's name.\n This change was due to validation issues. Now instead of a list of dictionaries\n without any pointer to what they are, we are using the name of the module and then a\n dictionary containing the configuration params for said module.\n\n Args:\n config (dict): Dictionary containing config got from configuration.yaml\n\n Returns:\n dict: updated configuration.\n\n \"\"\"\n updated_config = {}\n for config_type, modules in config.items():\n if config_type in (\"parsers\", \"connectors\", \"skills\", \"databases\"):\n updated_config[config_type] = convert_dictionary(modules)\n\n config.update(updated_config)\n\n return config\n\n\ndef file_is_ipython_notebook(path):\n \"\"\"Check whether a file is an iPython Notebook.\n\n Args:\n path (str): path to the file.\n\n Examples:\n path : source path with .ipynb file '/path/src/my_file.ipynb.\n\n \"\"\"\n return path.lower().endswith(\".ipynb\")\n\n\ndef convert_ipynb_to_script(notebook_path, output_path):\n \"\"\"Convert an iPython Notebook to a python script.\n\n Args:\n notebook_path (str): path to the notebook file.\n output_path (str): path to the script file destination.\n\n Examples:\n notebook_path : source path with .ipynb file '/path/src/my_file.ipynb.\n output_path : destination path with .py file '/path/src/my_file.py.\n\n \"\"\"\n with open(notebook_path, \"r\") as notebook_path_handle:\n raw_notebook = notebook_path_handle.read()\n notebook = nbformat.reads(raw_notebook, as_version=4)\n script, _ = PythonExporter().from_notebook_node(notebook)\n with open(output_path, \"w\") as output_path_handle:\n output_path_handle.write(script)\n\n\ndef extract_gist_id(gist_string):\n \"\"\"Extract the gist ID from a url.\n\n Will also work if simply passed an ID.\n\n Args:\n gist_string (str): Gist URL.\n\n Returns:\n string: The gist ID.\n\n Examples:\n gist_string : Gist url 'https://gist.github.com/{user}/{id}'.\n\n \"\"\"\n return gist_string.split(\"/\")[-1]\n\n\ndef add_skill_attributes(func):\n \"\"\"Add the attributes which makes a function a skill.\n\n Args:\n func (func): Skill function.\n\n Returns:\n func: The skill function with the new attributes.\n\n \"\"\"\n if not hasattr(func, \"skill\"):\n func.skill = True\n if not hasattr(func, \"matchers\"):\n func.matchers = []\n if not hasattr(func, \"constraints\"):\n func.constraints = []\n return func\n\n\ndef get_parser_config(name, modules):\n \"\"\"Get parser from modules list.\n\n After the change to the configuration we are adding the \"enabled\" flag to each\n active module, this allows us to disable to module if there is any problem with\n it. This helper method helps getting the config from the list of active parsers.\n\n Args:\n name (string): Name of the parser to be fetched.\n modules (list): List of all active modules.\n\n Returns:\n dict or None: The module config or None if not found.\n\n \"\"\"\n if modules:\n for parser in modules:\n if parser[\"config\"][\"name\"] == name:\n return parser[\"config\"]\n return None\n\n\ndef get_config_option(options, config, found, not_found):\n \"\"\"Get config details and return useful information to list active modules.\n\n When we list modules we have to do a lot of search and get, this function serves as an\n helper to get all the needed information to show in a list format. Since we are using\n different formats and need to get 3 different details from the config we will either\n return them or use the placeholder from `not_found`.\n\n Args:\n options(list): list of all possible options to search in config.\n config(dict): This will be a section of the configuration (connectors, parsers, skills, etc).\n found(str, bool): Expected text if option exists in config.\n not_found(str): expected text if option doesn't exist in config.\n\n \"\"\"\n try:\n for option in options:\n if config.get(option):\n return found, option, config.get(option)\n return not_found, not_found, not_found\n except (TypeError, AttributeError):\n return not_found, not_found, not_found\n\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"A extended JSONEncoder class.\n\n This class is customised JSONEncoder class which helps to convert\n dict to JSON. The datetime objects are converted to dict with fields\n as keys.\n\n \"\"\"\n\n # pylint: disable=method-hidden\n # See https://github.com/PyCQA/pylint/issues/414 for reference\n\n serializers = {}\n\n def default(self, o):\n \"\"\"Convert the given datetime object to dict.\n\n Args:\n o (object): The datetime object to be marshalled.\n\n Returns:\n dict (object): A dict with datatime object data.\n\n Example:\n A dict which is returned after marshalling::\n\n {\n \"__class__\": \"datetime\",\n \"year\": 2018,\n \"month\": 10,\n \"day\": 2,\n \"hour\": 0,\n \"minute\": 41,\n \"second\": 17,\n \"microsecond\": 74644\n }\n\n \"\"\"\n marshaller = self.serializers.get(type(o), super(JSONEncoder, self).default)\n return marshaller(o)\n\n\nclass JSONDecoder:\n \"\"\"A JSONDecoder class.\n\n This class will convert dict containing datetime values\n to datetime objects.\n\n \"\"\"\n\n decoders = {}\n\n def __call__(self, dct):\n \"\"\"Convert given dict to datetime objects.\n\n Args:\n dct (object): A dict containing datetime values and class type.\n\n Returns:\n object or dct: The datetime object for given dct, or dct if\n respective class decoder is not found.\n\n Example:\n A datetime object returned after decoding::\n\n datetime.datetime(2018, 10, 2, 0, 41, 17, 74644)\n\n \"\"\"\n if dct.get(\"__class__\") in self.decoders:\n return self.decoders[dct[\"__class__\"]](dct)\n return dct\n\n\ndef register_json_type(type_cls, fields, decode_fn):\n \"\"\"Register JSON types.\n\n This method will register the serializers and decoders for the\n JSONEncoder and JSONDecoder classes respectively.\n\n Args:\n type_cls (object): A datetime object.\n fields (list): List of fields used to store data in dict.\n decode_fn (object): A lambda function object for decoding.\n\n \"\"\"\n type_name = type_cls.__name__\n JSONEncoder.serializers[type_cls] = lambda obj: dict(\n __class__=type_name, **{field: getattr(obj, field) for field in fields}\n )\n JSONDecoder.decoders[type_name] = decode_fn\n\n\nregister_json_type(\n datetime.datetime,\n [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"microsecond\"],\n lambda dct: datetime.datetime(\n dct[\"year\"],\n dct[\"month\"],\n dct[\"day\"],\n dct[\"hour\"],\n dct[\"minute\"],\n dct[\"second\"],\n dct[\"microsecond\"],\n ),\n)\n\nregister_json_type(\n datetime.date,\n [\"year\", \"month\", \"day\"],\n lambda dct: datetime.date(dct[\"year\"], dct[\"month\"], dct[\"day\"]),\n)\n\nregister_json_type(\n datetime.time,\n [\"hour\", \"minute\", \"second\", \"microsecond\"],\n lambda dct: datetime.time(\n dct[\"hour\"], dct[\"minute\"], dct[\"second\"], dct[\"microsecond\"]\n ),\n)\n", "path": "opsdroid/helper.py"}], "after_files": [{"content": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport datetime\nimport os\nimport stat\nimport logging\nimport json\n\nimport nbformat\nfrom nbconvert import PythonExporter\n\n_LOGGER = logging.getLogger(__name__)\n\n\n# pylint: disable=inconsistent-return-statements\ndef get_opsdroid():\n \"\"\"Return the running opsdroid instance.\n\n Returns:\n object: opsdroid instance.\n\n \"\"\"\n from opsdroid.core import OpsDroid\n\n if len(OpsDroid.instances) == 1:\n return OpsDroid.instances[0]\n\n\ndef del_rw(action, name, exc):\n \"\"\"Error handler for removing read only files.\n\n Args:\n action: the function that raised the exception\n name: path name passed to the function (path and file name)\n exc: exception information return by sys.exc_info()\n\n Raises:\n OsError : If the file to be removed is a directory.\n\n \"\"\"\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n\n# This is meant to provide backwards compatibility for versions\n# prior to 0.16.0 in the future this will be deleted\n\n\ndef convert_dictionary(modules):\n \"\"\"Convert dictionary to new format.\n\n We iterate over all the modules in the list and change the dictionary\n to be in the format 'name_of_module: { config_params}'\n\n Args:\n modules (list): List of dictionaries that contain the module configuration\n\n Return:\n List: New modified list following the new format.\n\n \"\"\"\n config = dict()\n\n if isinstance(modules, list):\n _LOGGER.warning(\n \"Opsdroid has a new configuration format since version 0.17.0. Please read on how to migrate in the documentation at https://docs.opsdroid.dev/en/stable/configuration.html#migrate-to-new-configuration-layout.\"\n )\n for module in modules:\n module_copy = module.copy()\n del module_copy[\"name\"]\n\n if module.get(\"access-token\") or module.get(\"api-token\"):\n _LOGGER.warning(\n _(\n \"Configuration param for %s has been deprecated in favor of 'token', please update your config.\"\n ),\n module[\"name\"],\n )\n module_copy[\"token\"] = module.get(\"access-token\") or module.get(\n \"api-token\"\n )\n\n config[module[\"name\"]] = module_copy\n\n return config\n else:\n return modules\n\n\ndef update_pre_0_17_config_format(config):\n \"\"\"Update each configuration param that contains 'name'.\n\n We decided to ditch the name param and instead divide each module by it's name.\n This change was due to validation issues. Now instead of a list of dictionaries\n without any pointer to what they are, we are using the name of the module and then a\n dictionary containing the configuration params for said module.\n\n Args:\n config (dict): Dictionary containing config got from configuration.yaml\n\n Returns:\n dict: updated configuration.\n\n \"\"\"\n updated_config = {}\n for config_type, modules in config.items():\n if config_type in (\"parsers\", \"connectors\", \"skills\", \"databases\"):\n updated_config[config_type] = convert_dictionary(modules)\n\n config.update(updated_config)\n\n return config\n\n\ndef file_is_ipython_notebook(path):\n \"\"\"Check whether a file is an iPython Notebook.\n\n Args:\n path (str): path to the file.\n\n Examples:\n path : source path with .ipynb file '/path/src/my_file.ipynb.\n\n \"\"\"\n return path.lower().endswith(\".ipynb\")\n\n\ndef convert_ipynb_to_script(notebook_path, output_path):\n \"\"\"Convert an iPython Notebook to a python script.\n\n Args:\n notebook_path (str): path to the notebook file.\n output_path (str): path to the script file destination.\n\n Examples:\n notebook_path : source path with .ipynb file '/path/src/my_file.ipynb.\n output_path : destination path with .py file '/path/src/my_file.py.\n\n \"\"\"\n with open(notebook_path, \"r\") as notebook_path_handle:\n raw_notebook = notebook_path_handle.read()\n notebook = nbformat.reads(raw_notebook, as_version=4)\n script, _ = PythonExporter().from_notebook_node(notebook)\n with open(output_path, \"w\") as output_path_handle:\n output_path_handle.write(script)\n\n\ndef extract_gist_id(gist_string):\n \"\"\"Extract the gist ID from a url.\n\n Will also work if simply passed an ID.\n\n Args:\n gist_string (str): Gist URL.\n\n Returns:\n string: The gist ID.\n\n Examples:\n gist_string : Gist url 'https://gist.github.com/{user}/{id}'.\n\n \"\"\"\n return gist_string.split(\"/\")[-1]\n\n\ndef add_skill_attributes(func):\n \"\"\"Add the attributes which makes a function a skill.\n\n Args:\n func (func): Skill function.\n\n Returns:\n func: The skill function with the new attributes.\n\n \"\"\"\n if not hasattr(func, \"skill\"):\n func.skill = True\n if not hasattr(func, \"matchers\"):\n func.matchers = []\n if not hasattr(func, \"constraints\"):\n func.constraints = []\n return func\n\n\ndef get_parser_config(name, modules):\n \"\"\"Get parser from modules list.\n\n After the change to the configuration we are adding the \"enabled\" flag to each\n active module, this allows us to disable to module if there is any problem with\n it. This helper method helps getting the config from the list of active parsers.\n\n Args:\n name (string): Name of the parser to be fetched.\n modules (list): List of all active modules.\n\n Returns:\n dict or None: The module config or None if not found.\n\n \"\"\"\n if modules:\n for parser in modules:\n if parser[\"config\"][\"name\"] == name:\n return parser[\"config\"]\n return None\n\n\ndef get_config_option(options, config, found, not_found):\n \"\"\"Get config details and return useful information to list active modules.\n\n When we list modules we have to do a lot of search and get, this function serves as an\n helper to get all the needed information to show in a list format. Since we are using\n different formats and need to get 3 different details from the config we will either\n return them or use the placeholder from `not_found`.\n\n Args:\n options(list): list of all possible options to search in config.\n config(dict): This will be a section of the configuration (connectors, parsers, skills, etc).\n found(str, bool): Expected text if option exists in config.\n not_found(str): expected text if option doesn't exist in config.\n\n \"\"\"\n try:\n for option in options:\n if config.get(option):\n return found, option, config.get(option)\n return not_found, not_found, not_found\n except (TypeError, AttributeError):\n return not_found, not_found, not_found\n\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"A extended JSONEncoder class.\n\n This class is customised JSONEncoder class which helps to convert\n dict to JSON. The datetime objects are converted to dict with fields\n as keys.\n\n \"\"\"\n\n # pylint: disable=method-hidden\n # See https://github.com/PyCQA/pylint/issues/414 for reference\n\n serializers = {}\n\n def default(self, o):\n \"\"\"Convert the given datetime object to dict.\n\n Args:\n o (object): The datetime object to be marshalled.\n\n Returns:\n dict (object): A dict with datatime object data.\n\n Example:\n A dict which is returned after marshalling::\n\n {\n \"__class__\": \"datetime\",\n \"year\": 2018,\n \"month\": 10,\n \"day\": 2,\n \"hour\": 0,\n \"minute\": 41,\n \"second\": 17,\n \"microsecond\": 74644\n }\n\n \"\"\"\n marshaller = self.serializers.get(type(o), super(JSONEncoder, self).default)\n return marshaller(o)\n\n\nclass JSONDecoder:\n \"\"\"A JSONDecoder class.\n\n This class will convert dict containing datetime values\n to datetime objects.\n\n \"\"\"\n\n decoders = {}\n\n def __call__(self, dct):\n \"\"\"Convert given dict to datetime objects.\n\n Args:\n dct (object): A dict containing datetime values and class type.\n\n Returns:\n object or dct: The datetime object for given dct, or dct if\n respective class decoder is not found.\n\n Example:\n A datetime object returned after decoding::\n\n datetime.datetime(2018, 10, 2, 0, 41, 17, 74644)\n\n \"\"\"\n if dct.get(\"__class__\") in self.decoders:\n return self.decoders[dct[\"__class__\"]](dct)\n return dct\n\n\ndef register_json_type(type_cls, fields, decode_fn):\n \"\"\"Register JSON types.\n\n This method will register the serializers and decoders for the\n JSONEncoder and JSONDecoder classes respectively.\n\n Args:\n type_cls (object): A datetime object.\n fields (list): List of fields used to store data in dict.\n decode_fn (object): A lambda function object for decoding.\n\n \"\"\"\n type_name = type_cls.__name__\n JSONEncoder.serializers[type_cls] = lambda obj: dict(\n __class__=type_name, **{field: getattr(obj, field) for field in fields}\n )\n JSONDecoder.decoders[type_name] = decode_fn\n\n\nregister_json_type(\n datetime.datetime,\n [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"microsecond\"],\n lambda dct: datetime.datetime(\n dct[\"year\"],\n dct[\"month\"],\n dct[\"day\"],\n dct[\"hour\"],\n dct[\"minute\"],\n dct[\"second\"],\n dct[\"microsecond\"],\n ),\n)\n\nregister_json_type(\n datetime.date,\n [\"year\", \"month\", \"day\"],\n lambda dct: datetime.date(dct[\"year\"], dct[\"month\"], dct[\"day\"]),\n)\n\nregister_json_type(\n datetime.time,\n [\"hour\", \"minute\", \"second\", \"microsecond\"],\n lambda dct: datetime.time(\n dct[\"hour\"], dct[\"minute\"], dct[\"second\"], dct[\"microsecond\"]\n ),\n)\n", "path": "opsdroid/helper.py"}]}
| 3,836 | 167 |
gh_patches_debug_6285
|
rasdani/github-patches
|
git_diff
|
encode__httpx-1503
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CertTypes `keyfile` and `password` should be Optional types.
`SSLContext.load_cert_chain` can take `None` as arguments values ([docs](https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain)) so I guess this:
https://github.com/encode/httpx/blob/c09e61d50c8f169187cada6dbf14b89c7763c63f/httpx/_types.py#L54
should be rewritten as follows:
```python
CertTypes = Union[str, Tuple[str, Optional[str]], Tuple[str, Optional[str], Optional[str]]]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_types.py`
Content:
```
1 """
2 Type definitions for type checking purposes.
3 """
4
5 import ssl
6 from http.cookiejar import CookieJar
7 from typing import (
8 IO,
9 TYPE_CHECKING,
10 AsyncIterable,
11 Callable,
12 Dict,
13 Iterable,
14 List,
15 Mapping,
16 Optional,
17 Sequence,
18 Tuple,
19 Union,
20 )
21
22 if TYPE_CHECKING: # pragma: no cover
23 from ._auth import Auth # noqa: F401
24 from ._config import Proxy, Timeout # noqa: F401
25 from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401
26
27
28 PrimitiveData = Optional[Union[str, int, float, bool]]
29
30 RawURL = Tuple[bytes, bytes, Optional[int], bytes]
31
32 URLTypes = Union["URL", str]
33
34 QueryParamTypes = Union[
35 "QueryParams",
36 Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],
37 List[Tuple[str, PrimitiveData]],
38 Tuple[Tuple[str, PrimitiveData], ...],
39 str,
40 bytes,
41 None,
42 ]
43
44 HeaderTypes = Union[
45 "Headers",
46 Dict[str, str],
47 Dict[bytes, bytes],
48 Sequence[Tuple[str, str]],
49 Sequence[Tuple[bytes, bytes]],
50 ]
51
52 CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]
53
54 CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
55 VerifyTypes = Union[str, bool, ssl.SSLContext]
56 TimeoutTypes = Union[
57 Optional[float],
58 Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],
59 "Timeout",
60 ]
61 ProxiesTypes = Union[URLTypes, "Proxy", Dict[URLTypes, Union[None, URLTypes, "Proxy"]]]
62
63 AuthTypes = Union[
64 Tuple[Union[str, bytes], Union[str, bytes]],
65 Callable[["Request"], "Request"],
66 "Auth",
67 None,
68 ]
69
70 ByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]
71 RequestContent = Union[str, bytes, ByteStream]
72 ResponseContent = Union[str, bytes, ByteStream]
73
74 RequestData = dict
75
76 FileContent = Union[IO[str], IO[bytes], str, bytes]
77 FileTypes = Union[
78 # file (or text)
79 FileContent,
80 # (filename, file (or text))
81 Tuple[Optional[str], FileContent],
82 # (filename, file (or text), content_type)
83 Tuple[Optional[str], FileContent, Optional[str]],
84 ]
85 RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/_types.py b/httpx/_types.py
--- a/httpx/_types.py
+++ b/httpx/_types.py
@@ -51,7 +51,14 @@
CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]
-CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
+CertTypes = Union[
+ # certfile
+ str,
+ # (certfile, keyfile)
+ Tuple[str, Optional[str]],
+ # (certfile, keyfile, password)
+ Tuple[str, Optional[str], Optional[str]],
+]
VerifyTypes = Union[str, bool, ssl.SSLContext]
TimeoutTypes = Union[
Optional[float],
|
{"golden_diff": "diff --git a/httpx/_types.py b/httpx/_types.py\n--- a/httpx/_types.py\n+++ b/httpx/_types.py\n@@ -51,7 +51,14 @@\n \n CookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n \n-CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\n+CertTypes = Union[\n+ # certfile\n+ str,\n+ # (certfile, keyfile)\n+ Tuple[str, Optional[str]],\n+ # (certfile, keyfile, password)\n+ Tuple[str, Optional[str], Optional[str]],\n+]\n VerifyTypes = Union[str, bool, ssl.SSLContext]\n TimeoutTypes = Union[\n Optional[float],\n", "issue": "CertTypes `keyfile` and `password` should be Optional types.\n`SSLContext.load_cert_chain` can take `None` as arguments values ([docs](https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain)) so I guess this:\r\nhttps://github.com/encode/httpx/blob/c09e61d50c8f169187cada6dbf14b89c7763c63f/httpx/_types.py#L54\r\nshould be rewritten as follows:\r\n```python\r\nCertTypes = Union[str, Tuple[str, Optional[str]], Tuple[str, Optional[str], Optional[str]]] \r\n```\n", "before_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterable,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nRawURL = Tuple[bytes, bytes, Optional[int], bytes]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n Tuple[Tuple[str, PrimitiveData], ...],\n str,\n bytes,\n None,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[None, URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n None,\n]\n\nByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]\nRequestContent = Union[str, bytes, ByteStream]\nResponseContent = Union[str, bytes, ByteStream]\n\nRequestData = dict\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}], "after_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterable,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nRawURL = Tuple[bytes, bytes, Optional[int], bytes]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n Tuple[Tuple[str, PrimitiveData], ...],\n str,\n bytes,\n None,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n\nCertTypes = Union[\n # certfile\n str,\n # (certfile, keyfile)\n Tuple[str, Optional[str]],\n # (certfile, keyfile, password)\n Tuple[str, Optional[str], Optional[str]],\n]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[None, URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n None,\n]\n\nByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]\nRequestContent = Union[str, bytes, ByteStream]\nResponseContent = Union[str, bytes, ByteStream]\n\nRequestData = dict\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}]}
| 1,133 | 170 |
gh_patches_debug_20197
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-5569
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Authentik Outpost Proxy Bad Gateway error when sign_out
**Describe the bug**
I get a 502 "Bad gateway" error (and a go stacktrace) when I go on outpost sign_out url
**To Reproduce**
1. Set up a authentik outpost proxy
2. Configure your app to login with a traefik forwardAuth middleware
3. Login with success
4. Got to the signout url `https://svc.my_domain.tld/outpost.goauthentik.io/sign_out`
5. See error 502 "Bad Gateway"
6. In the log `http: panic serving 172.18.0.2:36148: interface conversion: interface {} is nil, not application.Claims`
**Expected behavior**
A successful Authentik/App logout
**Screenshots**
N/A
**Logs**
```
authentik-proxy-1 | 2023/05/04 10:58:37 http: panic serving 172.18.0.2:36148: interface conversion: interface {} is nil, not application.Claims
authentik-proxy-1 | goroutine 38672 [running]:
authentik-proxy-1 | net/http.(*conn).serve.func1()
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:1854 +0xbf
authentik-proxy-1 | panic({0x1029280, 0xc0007e0930})
authentik-proxy-1 | /usr/local/go/src/runtime/panic.go:890 +0x263
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.(*Application).Logout(0xc000208700, {0xc0006ff6c0, 0x40})
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/session.go:97 +0x169d
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.(*Application).handleSignOut(0xc000208700, {0x12cea40, 0xc00039b460}, 0xc00043b480?)
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:274 +0x297
authentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0x0?, {0x12cea40?, 0xc00039b460?}, 0x12?)
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.NewApplication.func3.1({0x12cea40, 0xc00039b460}, 0xc0007b6f00)
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:187 +0x1f9
authentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0xc0006ff6c0?, {0x12cea40?, 0xc00039b460?}, 0x0?)
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.NewApplication.func2.1({0x12cea40, 0xc00039b460}, 0xc0007b6f00)
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:165 +0x222
authentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0xc0008c6d20?, {0x12cea40?, 0xc00039b460?}, 0x0?)
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f
authentik-proxy-1 | goauthentik.io/internal/utils/web.loggingHandler.ServeHTTP({{0x12cb9e0?, 0xc00039b420?}, 0xc00016f2d0?, 0xc0001256d0?}, {0x12cee30?, 0xc000482fc0}, 0xc0007b6f00)
authentik-proxy-1 | /go/src/goauthentik.io/internal/utils/web/middleware.go:98 +0x12c
authentik-proxy-1 | github.com/gorilla/mux.(*Router).ServeHTTP(0xc000000300, {0x12cee30, 0xc000482fc0}, 0xc0007b6d00)
authentik-proxy-1 | /go/pkg/mod/github.com/gorilla/[email protected]/mux.go:210 +0x1cf
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.(*Application).ServeHTTP(...)
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:250
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2.(*ProxyServer).Handle(0xc00021c370, {0x12cee30, 0xc000482fc0}, 0xc0007b6d00)
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/handlers.go:129 +0xa25
authentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0x1136b00?, {0x12cee30?, 0xc000482fc0?}, 0x7f59d0?)
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f
authentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2.NewProxyServer.func1.1({0x12cee30, 0xc000482fc0}, 0xc0003b9260?)
authentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/proxyv2.go:46 +0x3c
authentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0xc0007b6c00?, {0x12cee30?, 0xc000482fc0?}, 0xc0008c79e8?)
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f
authentik-proxy-1 | github.com/gorilla/mux.(*Router).ServeHTTP(0xc000000000, {0x12cee30, 0xc000482fc0}, 0xc0007b6b00)
authentik-proxy-1 | /go/pkg/mod/github.com/gorilla/[email protected]/mux.go:210 +0x1cf
authentik-proxy-1 | net/http.serverHandler.ServeHTTP({0xc0008cae40?}, {0x12cee30, 0xc000482fc0}, 0xc0007b6b00)
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:2936 +0x316
authentik-proxy-1 | net/http.(*conn).serve(0xc00012efc0, {0x12cf600, 0xc0001998c0})
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:1995 +0x612
authentik-proxy-1 | created by net/http.(*Server).Serve
authentik-proxy-1 | /usr/local/go/src/net/http/server.go:3089 +0x5ed
```
**Version and Deployment (please complete the following information):**
- authentik version: 2023.4.1
- Deployment: docker-compose
**Additional context**
The login works fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py`
Content:
```
1 # Generated by Django 4.1.7 on 2023-05-06 16:18
2
3 from django.db import migrations, models
4
5 import authentik.providers.oauth2.models
6
7
8 class Migration(migrations.Migration):
9 dependencies = [
10 (
11 "authentik_providers_oauth2",
12 "0015_accesstoken_auth_time_authorizationcode_auth_time_and_more",
13 ),
14 ]
15
16 operations = [
17 migrations.AlterField(
18 model_name="refreshtoken",
19 name="token",
20 field=models.TextField(
21 default=authentik.providers.oauth2.models.generate_client_secret
22 ),
23 ),
24 ]
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py b/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py
--- a/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py
+++ b/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py
@@ -21,4 +21,26 @@
default=authentik.providers.oauth2.models.generate_client_secret
),
),
+ migrations.AlterField(
+ model_name="oauth2provider",
+ name="sub_mode",
+ field=models.TextField(
+ choices=[
+ ("hashed_user_id", "Based on the Hashed User ID"),
+ ("user_id", "Based on user ID"),
+ ("user_uuid", "Based on user UUID"),
+ ("user_username", "Based on the username"),
+ (
+ "user_email",
+ "Based on the User's Email. This is recommended over the UPN method.",
+ ),
+ (
+ "user_upn",
+ "Based on the User's UPN, only works if user has a 'upn' attribute set. Use this method only if you have different UPN and Mail domains.",
+ ),
+ ],
+ default="hashed_user_id",
+ help_text="Configure what data should be used as unique User Identifier. For most cases, the default should be fine.",
+ ),
+ ),
]
|
{"golden_diff": "diff --git a/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py b/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py\n--- a/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py\n+++ b/authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py\n@@ -21,4 +21,26 @@\n default=authentik.providers.oauth2.models.generate_client_secret\n ),\n ),\n+ migrations.AlterField(\n+ model_name=\"oauth2provider\",\n+ name=\"sub_mode\",\n+ field=models.TextField(\n+ choices=[\n+ (\"hashed_user_id\", \"Based on the Hashed User ID\"),\n+ (\"user_id\", \"Based on user ID\"),\n+ (\"user_uuid\", \"Based on user UUID\"),\n+ (\"user_username\", \"Based on the username\"),\n+ (\n+ \"user_email\",\n+ \"Based on the User's Email. This is recommended over the UPN method.\",\n+ ),\n+ (\n+ \"user_upn\",\n+ \"Based on the User's UPN, only works if user has a 'upn' attribute set. Use this method only if you have different UPN and Mail domains.\",\n+ ),\n+ ],\n+ default=\"hashed_user_id\",\n+ help_text=\"Configure what data should be used as unique User Identifier. For most cases, the default should be fine.\",\n+ ),\n+ ),\n ]\n", "issue": "Authentik Outpost Proxy Bad Gateway error when sign_out\n**Describe the bug**\r\nI get a 502 \"Bad gateway\" error (and a go stacktrace) when I go on outpost sign_out url\r\n\r\n**To Reproduce**\r\n1. Set up a authentik outpost proxy\r\n2. Configure your app to login with a traefik forwardAuth middleware\r\n3. Login with success\r\n4. Got to the signout url `https://svc.my_domain.tld/outpost.goauthentik.io/sign_out`\r\n5. See error 502 \"Bad Gateway\"\r\n6. In the log `http: panic serving 172.18.0.2:36148: interface conversion: interface {} is nil, not application.Claims`\r\n\r\n**Expected behavior**\r\nA successful Authentik/App logout \r\n\r\n**Screenshots**\r\nN/A\r\n\r\n**Logs**\r\n```\r\nauthentik-proxy-1 | 2023/05/04 10:58:37 http: panic serving 172.18.0.2:36148: interface conversion: interface {} is nil, not application.Claims \r\nauthentik-proxy-1 | goroutine 38672 [running]: \r\nauthentik-proxy-1 | net/http.(*conn).serve.func1() \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:1854 +0xbf \r\nauthentik-proxy-1 | panic({0x1029280, 0xc0007e0930}) \r\nauthentik-proxy-1 | /usr/local/go/src/runtime/panic.go:890 +0x263 \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.(*Application).Logout(0xc000208700, {0xc0006ff6c0, 0x40}) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/session.go:97 +0x169d \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.(*Application).handleSignOut(0xc000208700, {0x12cea40, 0xc00039b460}, 0xc00043b480?) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:274 +0x297 \r\nauthentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0x0?, {0x12cea40?, 0xc00039b460?}, 0x12?) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.NewApplication.func3.1({0x12cea40, 0xc00039b460}, 0xc0007b6f00) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:187 +0x1f9 \r\nauthentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0xc0006ff6c0?, {0x12cea40?, 0xc00039b460?}, 0x0?) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.NewApplication.func2.1({0x12cea40, 0xc00039b460}, 0xc0007b6f00) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:165 +0x222 \r\nauthentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0xc0008c6d20?, {0x12cea40?, 0xc00039b460?}, 0x0?) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f \r\nauthentik-proxy-1 | goauthentik.io/internal/utils/web.loggingHandler.ServeHTTP({{0x12cb9e0?, 0xc00039b420?}, 0xc00016f2d0?, 0xc0001256d0?}, {0x12cee30?, 0xc000482fc0}, 0xc0007b6f00) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/utils/web/middleware.go:98 +0x12c \r\nauthentik-proxy-1 | github.com/gorilla/mux.(*Router).ServeHTTP(0xc000000300, {0x12cee30, 0xc000482fc0}, 0xc0007b6d00) \r\nauthentik-proxy-1 | /go/pkg/mod/github.com/gorilla/[email protected]/mux.go:210 +0x1cf \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2/application.(*Application).ServeHTTP(...) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/application/application.go:250 \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2.(*ProxyServer).Handle(0xc00021c370, {0x12cee30, 0xc000482fc0}, 0xc0007b6d00) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/handlers.go:129 +0xa25 \r\nauthentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0x1136b00?, {0x12cee30?, 0xc000482fc0?}, 0x7f59d0?) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f \r\nauthentik-proxy-1 | goauthentik.io/internal/outpost/proxyv2.NewProxyServer.func1.1({0x12cee30, 0xc000482fc0}, 0xc0003b9260?) \r\nauthentik-proxy-1 | /go/src/goauthentik.io/internal/outpost/proxyv2/proxyv2.go:46 +0x3c \r\nauthentik-proxy-1 | net/http.HandlerFunc.ServeHTTP(0xc0007b6c00?, {0x12cee30?, 0xc000482fc0?}, 0xc0008c79e8?) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:2122 +0x2f \r\nauthentik-proxy-1 | github.com/gorilla/mux.(*Router).ServeHTTP(0xc000000000, {0x12cee30, 0xc000482fc0}, 0xc0007b6b00) \r\nauthentik-proxy-1 | /go/pkg/mod/github.com/gorilla/[email protected]/mux.go:210 +0x1cf \r\nauthentik-proxy-1 | net/http.serverHandler.ServeHTTP({0xc0008cae40?}, {0x12cee30, 0xc000482fc0}, 0xc0007b6b00) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:2936 +0x316 \r\nauthentik-proxy-1 | net/http.(*conn).serve(0xc00012efc0, {0x12cf600, 0xc0001998c0}) \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:1995 +0x612 \r\nauthentik-proxy-1 | created by net/http.(*Server).Serve \r\nauthentik-proxy-1 | /usr/local/go/src/net/http/server.go:3089 +0x5ed\r\n``` \r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.4.1\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nThe login works fine.\r\n\n", "before_files": [{"content": "# Generated by Django 4.1.7 on 2023-05-06 16:18\n\nfrom django.db import migrations, models\n\nimport authentik.providers.oauth2.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\n \"authentik_providers_oauth2\",\n \"0015_accesstoken_auth_time_authorizationcode_auth_time_and_more\",\n ),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"refreshtoken\",\n name=\"token\",\n field=models.TextField(\n default=authentik.providers.oauth2.models.generate_client_secret\n ),\n ),\n ]\n", "path": "authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py"}], "after_files": [{"content": "# Generated by Django 4.1.7 on 2023-05-06 16:18\n\nfrom django.db import migrations, models\n\nimport authentik.providers.oauth2.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\n \"authentik_providers_oauth2\",\n \"0015_accesstoken_auth_time_authorizationcode_auth_time_and_more\",\n ),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"refreshtoken\",\n name=\"token\",\n field=models.TextField(\n default=authentik.providers.oauth2.models.generate_client_secret\n ),\n ),\n migrations.AlterField(\n model_name=\"oauth2provider\",\n name=\"sub_mode\",\n field=models.TextField(\n choices=[\n (\"hashed_user_id\", \"Based on the Hashed User ID\"),\n (\"user_id\", \"Based on user ID\"),\n (\"user_uuid\", \"Based on user UUID\"),\n (\"user_username\", \"Based on the username\"),\n (\n \"user_email\",\n \"Based on the User's Email. This is recommended over the UPN method.\",\n ),\n (\n \"user_upn\",\n \"Based on the User's UPN, only works if user has a 'upn' attribute set. Use this method only if you have different UPN and Mail domains.\",\n ),\n ],\n default=\"hashed_user_id\",\n help_text=\"Configure what data should be used as unique User Identifier. For most cases, the default should be fine.\",\n ),\n ),\n ]\n", "path": "authentik/providers/oauth2/migrations/0016_alter_refreshtoken_token.py"}]}
| 2,635 | 338 |
gh_patches_debug_14526
|
rasdani/github-patches
|
git_diff
|
pfnet__pytorch-pfn-extras-372
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typing: ManualScheduleTrigger `points` should accept `int`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py`
Content:
```
1 # mypy: ignore-errors
2
3 from typing import List, Union, TYPE_CHECKING
4
5 from pytorch_pfn_extras.training import trigger
6
7
8 if TYPE_CHECKING:
9 from pytorch_pfn_extras.training.manager import _BaseExtensionsManager
10 from pytorch_pfn_extras.training._trigger_util import UnitLiteral
11
12
13 class ManualScheduleTrigger(trigger.Trigger):
14
15 """Trigger invoked at specified point(s) of iterations or epochs.
16
17 This trigger accepts iterations or epochs indicated by given point(s).
18 There are two ways to specify the point(s): iteration and epoch.
19 ``iteration`` means the number of updates, while ``epoch`` means the number
20 of sweeps over the training dataset. Fractional values are allowed
21 if the point is a number of epochs; the trigger uses the ``iteration``
22 and ``epoch_detail`` attributes defined by the manager.
23
24 Args:
25 points (int, float, or list of int or float): time of the trigger.
26 Must be an integer or list of integer if unit is ``'iteration'``.
27 unit (str): Unit of the time specified by ``points``. It must be
28 either ``'iteration'`` or ``'epoch'``.
29
30 """
31
32 def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):
33 if unit not in ('epoch', 'iteration'):
34 raise ValueError(
35 'Trigger unit must be either \'epoch\' or \'iteration\'.')
36
37 self.points = (points if isinstance(points, list) else [points])
38 self.unit = unit
39
40 def __call__(self, manager: '_BaseExtensionsManager') -> bool:
41 """Decides whether the extension should be called on this iteration.
42
43 Args:
44 manager (~pytorch_pfn_extras.training.ExtensionsManager):
45 Manager object that this trigger is associated with.
46 The iteration information in this manager is used to
47 determine if the trigger should fire.
48
49 Returns:
50 bool: True if the corresponding extension should be invoked in this
51 iteration.
52
53 """
54 fire = self.may_fire(manager.iteration, manager._iters_per_epoch)
55 return fire
56
57 def may_fire(self, iteration: int, epoch_length: int) -> bool:
58 if self.unit == 'epoch':
59 fire = any(
60 int(p * epoch_length) == iteration for p in self.points)
61 else:
62 fire = any(p == iteration for p in self.points)
63 return fire
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
--- a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
+++ b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
@@ -1,6 +1,6 @@
# mypy: ignore-errors
-from typing import List, Union, TYPE_CHECKING
+from typing import Sequence, Union, TYPE_CHECKING
from pytorch_pfn_extras.training import trigger
@@ -29,7 +29,7 @@
"""
- def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):
+ def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
|
{"golden_diff": "diff --git a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n--- a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n+++ b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n@@ -1,6 +1,6 @@\n # mypy: ignore-errors\n \n-from typing import List, Union, TYPE_CHECKING\n+from typing import Sequence, Union, TYPE_CHECKING\n \n from pytorch_pfn_extras.training import trigger\n \n@@ -29,7 +29,7 @@\n \n \"\"\"\n \n- def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):\n+ def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n", "issue": "Typing: ManualScheduleTrigger `points` should accept `int`\n\n", "before_files": [{"content": "# mypy: ignore-errors\n\nfrom typing import List, Union, TYPE_CHECKING\n\nfrom pytorch_pfn_extras.training import trigger\n\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras.training.manager import _BaseExtensionsManager\n from pytorch_pfn_extras.training._trigger_util import UnitLiteral\n\n\nclass ManualScheduleTrigger(trigger.Trigger):\n\n \"\"\"Trigger invoked at specified point(s) of iterations or epochs.\n\n This trigger accepts iterations or epochs indicated by given point(s).\n There are two ways to specify the point(s): iteration and epoch.\n ``iteration`` means the number of updates, while ``epoch`` means the number\n of sweeps over the training dataset. Fractional values are allowed\n if the point is a number of epochs; the trigger uses the ``iteration``\n and ``epoch_detail`` attributes defined by the manager.\n\n Args:\n points (int, float, or list of int or float): time of the trigger.\n Must be an integer or list of integer if unit is ``'iteration'``.\n unit (str): Unit of the time specified by ``points``. It must be\n either ``'iteration'`` or ``'epoch'``.\n\n \"\"\"\n\n def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n\n self.points = (points if isinstance(points, list) else [points])\n self.unit = unit\n\n def __call__(self, manager: '_BaseExtensionsManager') -> bool:\n \"\"\"Decides whether the extension should be called on this iteration.\n\n Args:\n manager (~pytorch_pfn_extras.training.ExtensionsManager):\n Manager object that this trigger is associated with.\n The iteration information in this manager is used to\n determine if the trigger should fire.\n\n Returns:\n bool: True if the corresponding extension should be invoked in this\n iteration.\n\n \"\"\"\n fire = self.may_fire(manager.iteration, manager._iters_per_epoch)\n return fire\n\n def may_fire(self, iteration: int, epoch_length: int) -> bool:\n if self.unit == 'epoch':\n fire = any(\n int(p * epoch_length) == iteration for p in self.points)\n else:\n fire = any(p == iteration for p in self.points)\n return fire\n", "path": "pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py"}], "after_files": [{"content": "# mypy: ignore-errors\n\nfrom typing import Sequence, Union, TYPE_CHECKING\n\nfrom pytorch_pfn_extras.training import trigger\n\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras.training.manager import _BaseExtensionsManager\n from pytorch_pfn_extras.training._trigger_util import UnitLiteral\n\n\nclass ManualScheduleTrigger(trigger.Trigger):\n\n \"\"\"Trigger invoked at specified point(s) of iterations or epochs.\n\n This trigger accepts iterations or epochs indicated by given point(s).\n There are two ways to specify the point(s): iteration and epoch.\n ``iteration`` means the number of updates, while ``epoch`` means the number\n of sweeps over the training dataset. Fractional values are allowed\n if the point is a number of epochs; the trigger uses the ``iteration``\n and ``epoch_detail`` attributes defined by the manager.\n\n Args:\n points (int, float, or list of int or float): time of the trigger.\n Must be an integer or list of integer if unit is ``'iteration'``.\n unit (str): Unit of the time specified by ``points``. It must be\n either ``'iteration'`` or ``'epoch'``.\n\n \"\"\"\n\n def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n\n self.points = (points if isinstance(points, list) else [points])\n self.unit = unit\n\n def __call__(self, manager: '_BaseExtensionsManager') -> bool:\n \"\"\"Decides whether the extension should be called on this iteration.\n\n Args:\n manager (~pytorch_pfn_extras.training.ExtensionsManager):\n Manager object that this trigger is associated with.\n The iteration information in this manager is used to\n determine if the trigger should fire.\n\n Returns:\n bool: True if the corresponding extension should be invoked in this\n iteration.\n\n \"\"\"\n fire = self.may_fire(manager.iteration, manager._iters_per_epoch)\n return fire\n\n def may_fire(self, iteration: int, epoch_length: int) -> bool:\n if self.unit == 'epoch':\n fire = any(\n int(p * epoch_length) == iteration for p in self.points)\n else:\n fire = any(p == iteration for p in self.points)\n return fire\n", "path": "pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py"}]}
| 931 | 210 |
gh_patches_debug_21898
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-2008
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unclear which `HTTPError` was instantiated from `deprecated_args()` warnings
In Falcon 3.0.x, instantiating `HTTPError` or its subclasses with positional arguments (of course except the allowed ones) generates a `DeprecatedWarning` via the `deprecated_args()` decorator.
However, it is unclear from the warning which class/function was invoked, it just says "calls [with more than N] positional args are deprecated". Brought up by @laurent-chriqui (see the linked PR).
Ideally, as a developer, I would like the warning to read along the lines of
```
DeprecatedWarning: Calls to HTTPNotFound.__init__(...) with positional args are deprecated. Please specify them as keyword arguments instead.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/util/deprecation.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Miscellaneous deprecation utilities.
16
17 This module provides decorators to mark functions and classes as deprecated.
18 """
19
20 import functools
21 import warnings
22
23
24 __all__ = (
25 'DeprecatedWarning',
26 'deprecated',
27 'deprecated_args',
28 )
29
30
31 # NOTE(kgriffs): We don't want our deprecations to be ignored by default,
32 # so create our own type.
33 #
34 # TODO(kgriffs): Revisit this decision if users complain.
35 class DeprecatedWarning(UserWarning):
36 pass
37
38
39 def deprecated(instructions, is_property=False, method_name=None):
40 """Flag a method as deprecated.
41
42 This function returns a decorator which can be used to mark deprecated
43 functions. Applying this decorator will result in a warning being
44 emitted when the function is used.
45
46 Args:
47 instructions (str): Specific guidance for the developer, e.g.:
48 'Please migrate to add_proxy(...)'.
49 is_property (bool): If the deprecated object is a property. It
50 will omit the ``(...)`` from the generated documentation.
51 method_name (str, optional): Set to override the name of the
52 deprecated function or property in the generated
53 documentation (default ``None``). This is useful when
54 decorating an alias that carries the target's ``__name__``.
55
56 """
57
58 def decorator(func):
59
60 object_name = 'property' if is_property else 'function'
61 post_name = '' if is_property else '(...)'
62 message = 'Call to deprecated {} {}{}. {}'.format(
63 object_name, method_name or func.__name__, post_name, instructions
64 )
65
66 @functools.wraps(func)
67 def wrapper(*args, **kwargs):
68 warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
69
70 return func(*args, **kwargs)
71
72 return wrapper
73
74 return decorator
75
76
77 def deprecated_args(*, allowed_positional, is_method=True):
78 """Flag a method call with positional args as deprecated.
79
80 Keyword Args:
81 allowed_positional (int): Number of allowed positional arguments
82 is_method (bool, optional): The decorated function is a method. Will
83 add one to the number of allowed positional args to account for
84 ``self``. Defaults to True.
85 """
86
87 template = (
88 'Calls with{} positional args are deprecated.'
89 ' Please specify them as keyword arguments instead.'
90 )
91 text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
92 warn_text = template.format(text)
93 if is_method:
94 allowed_positional += 1
95
96 def deprecated_args(fn):
97 @functools.wraps(fn)
98 def wraps(*args, **kwargs):
99 if len(args) > allowed_positional:
100 warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)
101 return fn(*args, **kwargs)
102
103 return wraps
104
105 return deprecated_args
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/util/deprecation.py b/falcon/util/deprecation.py
--- a/falcon/util/deprecation.py
+++ b/falcon/util/deprecation.py
@@ -85,11 +85,11 @@
"""
template = (
- 'Calls with{} positional args are deprecated.'
+ 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'
' Please specify them as keyword arguments instead.'
)
text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
- warn_text = template.format(text)
+ warn_text = template.format(arg_text=text)
if is_method:
allowed_positional += 1
@@ -97,7 +97,11 @@
@functools.wraps(fn)
def wraps(*args, **kwargs):
if len(args) > allowed_positional:
- warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)
+ warnings.warn(
+ warn_text.format(fn=fn.__qualname__),
+ DeprecatedWarning,
+ stacklevel=2,
+ )
return fn(*args, **kwargs)
return wraps
|
{"golden_diff": "diff --git a/falcon/util/deprecation.py b/falcon/util/deprecation.py\n--- a/falcon/util/deprecation.py\n+++ b/falcon/util/deprecation.py\n@@ -85,11 +85,11 @@\n \"\"\"\n \n template = (\n- 'Calls with{} positional args are deprecated.'\n+ 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n- warn_text = template.format(text)\n+ warn_text = template.format(arg_text=text)\n if is_method:\n allowed_positional += 1\n \n@@ -97,7 +97,11 @@\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n- warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)\n+ warnings.warn(\n+ warn_text.format(fn=fn.__qualname__),\n+ DeprecatedWarning,\n+ stacklevel=2,\n+ )\n return fn(*args, **kwargs)\n \n return wraps\n", "issue": "Unclear which `HTTPError` was instantiated from `deprecated_args()` warnings\nIn Falcon 3.0.x, instantiating `HTTPError` or its subclasses with positional arguments (of course except the allowed ones) generates a `DeprecatedWarning` via the `deprecated_args()` decorator.\r\n\r\nHowever, it is unclear from the warning which class/function was invoked, it just says \"calls [with more than N] positional args are deprecated\". Brought up by @laurent-chriqui (see the linked PR).\r\n\r\nIdeally, as a developer, I would like the warning to read along the lines of\r\n```\r\nDeprecatedWarning: Calls to HTTPNotFound.__init__(...) with positional args are deprecated. Please specify them as keyword arguments instead.\r\n```\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Miscellaneous deprecation utilities.\n\nThis module provides decorators to mark functions and classes as deprecated.\n\"\"\"\n\nimport functools\nimport warnings\n\n\n__all__ = (\n 'DeprecatedWarning',\n 'deprecated',\n 'deprecated_args',\n)\n\n\n# NOTE(kgriffs): We don't want our deprecations to be ignored by default,\n# so create our own type.\n#\n# TODO(kgriffs): Revisit this decision if users complain.\nclass DeprecatedWarning(UserWarning):\n pass\n\n\ndef deprecated(instructions, is_property=False, method_name=None):\n \"\"\"Flag a method as deprecated.\n\n This function returns a decorator which can be used to mark deprecated\n functions. Applying this decorator will result in a warning being\n emitted when the function is used.\n\n Args:\n instructions (str): Specific guidance for the developer, e.g.:\n 'Please migrate to add_proxy(...)'.\n is_property (bool): If the deprecated object is a property. It\n will omit the ``(...)`` from the generated documentation.\n method_name (str, optional): Set to override the name of the\n deprecated function or property in the generated\n documentation (default ``None``). This is useful when\n decorating an alias that carries the target's ``__name__``.\n\n \"\"\"\n\n def decorator(func):\n\n object_name = 'property' if is_property else 'function'\n post_name = '' if is_property else '(...)'\n message = 'Call to deprecated {} {}{}. {}'.format(\n object_name, method_name or func.__name__, post_name, instructions\n )\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(message, category=DeprecatedWarning, stacklevel=2)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef deprecated_args(*, allowed_positional, is_method=True):\n \"\"\"Flag a method call with positional args as deprecated.\n\n Keyword Args:\n allowed_positional (int): Number of allowed positional arguments\n is_method (bool, optional): The decorated function is a method. Will\n add one to the number of allowed positional args to account for\n ``self``. Defaults to True.\n \"\"\"\n\n template = (\n 'Calls with{} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n warn_text = template.format(text)\n if is_method:\n allowed_positional += 1\n\n def deprecated_args(fn):\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)\n return fn(*args, **kwargs)\n\n return wraps\n\n return deprecated_args\n", "path": "falcon/util/deprecation.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Miscellaneous deprecation utilities.\n\nThis module provides decorators to mark functions and classes as deprecated.\n\"\"\"\n\nimport functools\nimport warnings\n\n\n__all__ = (\n 'DeprecatedWarning',\n 'deprecated',\n 'deprecated_args',\n)\n\n\n# NOTE(kgriffs): We don't want our deprecations to be ignored by default,\n# so create our own type.\n#\n# TODO(kgriffs): Revisit this decision if users complain.\nclass DeprecatedWarning(UserWarning):\n pass\n\n\ndef deprecated(instructions, is_property=False, method_name=None):\n \"\"\"Flag a method as deprecated.\n\n This function returns a decorator which can be used to mark deprecated\n functions. Applying this decorator will result in a warning being\n emitted when the function is used.\n\n Args:\n instructions (str): Specific guidance for the developer, e.g.:\n 'Please migrate to add_proxy(...)'.\n is_property (bool): If the deprecated object is a property. It\n will omit the ``(...)`` from the generated documentation.\n method_name (str, optional): Set to override the name of the\n deprecated function or property in the generated\n documentation (default ``None``). This is useful when\n decorating an alias that carries the target's ``__name__``.\n\n \"\"\"\n\n def decorator(func):\n\n object_name = 'property' if is_property else 'function'\n post_name = '' if is_property else '(...)'\n message = 'Call to deprecated {} {}{}. {}'.format(\n object_name, method_name or func.__name__, post_name, instructions\n )\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(message, category=DeprecatedWarning, stacklevel=2)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef deprecated_args(*, allowed_positional, is_method=True):\n \"\"\"Flag a method call with positional args as deprecated.\n\n Keyword Args:\n allowed_positional (int): Number of allowed positional arguments\n is_method (bool, optional): The decorated function is a method. Will\n add one to the number of allowed positional args to account for\n ``self``. Defaults to True.\n \"\"\"\n\n template = (\n 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n warn_text = template.format(arg_text=text)\n if is_method:\n allowed_positional += 1\n\n def deprecated_args(fn):\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n warnings.warn(\n warn_text.format(fn=fn.__qualname__),\n DeprecatedWarning,\n stacklevel=2,\n )\n return fn(*args, **kwargs)\n\n return wraps\n\n return deprecated_args\n", "path": "falcon/util/deprecation.py"}]}
| 1,376 | 255 |
gh_patches_debug_60934
|
rasdani/github-patches
|
git_diff
|
superduper-io__superduper-1837
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: Variable inject for list values in a serialised component missing kwargs
c = Component()
c.dict() -> {some keys: [ {}, { 'v': Variable_type }] }
due to
```
def _replace_variables(x, db, **kwargs):
from .document import Document
if isinstance(x, dict):
return {
_replace_variables(k, db, **kwargs): _replace_variables(v, db, **kwargs)
for k, v in x.items()
}
if isinstance(x, (list, tuple)):
return [_replace_variables(v, db) for v in x] -> BUG (need **kwargs here)
if isinstance(x, Variable):
return x.set(db, **kwargs)
if isinstance(x, Document):
return x.set_variables(db, **kwargs)
return x
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `superduperdb/base/serializable.py`
Content:
```
1 import dataclasses as dc
2 import importlib
3 import typing as t
4 from copy import deepcopy
5
6 from superduperdb.base.leaf import Leaf
7 from superduperdb.misc.serialization import asdict
8
9
10 def _from_dict(r: t.Any, db: None = None) -> t.Any:
11 from superduperdb.base.document import Document
12 from superduperdb.components.datatype import File, LazyArtifact
13
14 if isinstance(r, Document):
15 r = r.unpack(db, leaves_to_keep=(LazyArtifact, File))
16 if isinstance(r, (list, tuple)):
17 return [_from_dict(i, db=db) for i in r]
18 if not isinstance(r, dict):
19 return r
20 if '_content' in r:
21 r = r['_content']
22 if 'cls' in r and 'module' in r and 'dict' in r:
23 module = importlib.import_module(r['module'])
24 cls_ = getattr(module, r['cls'])
25 kwargs = _from_dict(r['dict'])
26 kwargs_init = {k: v for k, v in kwargs.items() if k not in cls_.set_post_init}
27 kwargs_post_init = {k: v for k, v in kwargs.items() if k in cls_.set_post_init}
28 instance = cls_(**kwargs_init)
29 for k, v in kwargs_post_init.items():
30 setattr(instance, k, v)
31 return instance
32 else:
33 return {k: _from_dict(v, db=db) for k, v in r.items()}
34
35
36 class VariableError(Exception):
37 ...
38
39
40 def _find_variables(r):
41 if isinstance(r, dict):
42 return sum([_find_variables(v) for v in r.values()], [])
43 elif isinstance(r, (list, tuple)):
44 return sum([_find_variables(v) for v in r], [])
45 elif isinstance(r, Variable):
46 return [r]
47 return []
48
49
50 def _replace_variables(x, db, **kwargs):
51 from .document import Document
52
53 if isinstance(x, dict):
54 return {
55 _replace_variables(k, db, **kwargs): _replace_variables(v, db, **kwargs)
56 for k, v in x.items()
57 }
58 if isinstance(x, (list, tuple)):
59 return [_replace_variables(v, db) for v in x]
60 if isinstance(x, Variable):
61 return x.set(db, **kwargs)
62 if isinstance(x, Document):
63 return x.set_variables(db, **kwargs)
64 return x
65
66
67 @dc.dataclass
68 class Serializable(Leaf):
69 """
70 Base class for serializable objects. This class is used to serialize and
71 deserialize objects to and from JSON + Artifact instances.
72 """
73
74 set_post_init: t.ClassVar[t.Sequence] = ()
75
76 @property
77 def unique_id(self):
78 return str(hash(self.dict().encode()))
79
80 @property
81 def variables(self) -> t.List['Variable']:
82 out = {}
83 r = self.encode(leaf_types_to_keep=(Variable,))
84 v = _find_variables(r)
85 for var in v:
86 out[var.value] = var
87 return sorted(list(out.values()), key=lambda x: x.value)
88
89 def set_variables(self, db, **kwargs) -> 'Serializable':
90 """
91 Set free variables of self.
92
93 :param db:
94 """
95 r = self.encode(leaf_types_to_keep=(Variable,))
96 r = _replace_variables(r, db, **kwargs)
97 return self.decode(r)
98
99 def encode(
100 self,
101 leaf_types_to_keep: t.Sequence = (),
102 ):
103 r = dict(self.dict().encode(leaf_types_to_keep=leaf_types_to_keep))
104 r['leaf_type'] = 'serializable'
105 return {'_content': r}
106
107 @classmethod
108 def decode(cls, r, db: t.Optional[t.Any] = None):
109 return _from_dict(r, db=db)
110
111 def dict(self):
112 from superduperdb import Document
113
114 return Document(asdict(self))
115
116 def copy(self):
117 return deepcopy(self)
118
119
120 @dc.dataclass
121 class Variable(Serializable):
122 """
123 Mechanism for allowing "free variables" in a serializable object.
124 The idea is to allow a variable to be set at runtime, rather than
125 at object creation time.
126
127 :param value: The name of the variable to be set at runtime.
128 :param setter_callback: A callback function that takes the value, datalayer
129 and kwargs as input and returns the formatted
130 variable.
131 """
132
133 value: t.Any
134 setter_callback: dc.InitVar[t.Optional[t.Callable]] = None
135
136 def __post_init__(self, setter_callback):
137 self.setter_callback = setter_callback
138
139 def __repr__(self) -> str:
140 return '$' + str(self.value)
141
142 def __hash__(self) -> int:
143 return hash(self.value)
144
145 def set(self, db, **kwargs):
146 """
147 Get the intended value from the values of the global variables.
148
149 >>> Variable('number').set(db, number=1.5, other='test')
150 1.5
151
152 :param db: The datalayer instance.
153 :param kwargs: Variables to be used in the setter_callback
154 or as formatting variables.
155 """
156 if self.setter_callback is not None:
157 try:
158 return self.setter_callback(db, self.value, kwargs)
159 except Exception as e:
160 raise VariableError(
161 f'Could not set variable {self.value} '
162 f'based on {self.setter_callback} and **kwargs: {kwargs}'
163 ) from e
164 else:
165 assert isinstance(self.value, str)
166 return kwargs[self.value]
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/superduperdb/base/serializable.py b/superduperdb/base/serializable.py
--- a/superduperdb/base/serializable.py
+++ b/superduperdb/base/serializable.py
@@ -56,7 +56,7 @@
for k, v in x.items()
}
if isinstance(x, (list, tuple)):
- return [_replace_variables(v, db) for v in x]
+ return [_replace_variables(v, db, **kwargs) for v in x]
if isinstance(x, Variable):
return x.set(db, **kwargs)
if isinstance(x, Document):
|
{"golden_diff": "diff --git a/superduperdb/base/serializable.py b/superduperdb/base/serializable.py\n--- a/superduperdb/base/serializable.py\n+++ b/superduperdb/base/serializable.py\n@@ -56,7 +56,7 @@\n for k, v in x.items()\n }\n if isinstance(x, (list, tuple)):\n- return [_replace_variables(v, db) for v in x]\n+ return [_replace_variables(v, db, **kwargs) for v in x]\n if isinstance(x, Variable):\n return x.set(db, **kwargs)\n if isinstance(x, Document):\n", "issue": "[BUG]: Variable inject for list values in a serialised component missing kwargs\nc = Component()\r\n\r\nc.dict() -> {some keys: [ {}, { 'v': Variable_type }] }\r\n\r\ndue to \r\n```\r\n\r\ndef _replace_variables(x, db, **kwargs):\r\n from .document import Document\r\n\r\n if isinstance(x, dict):\r\n return {\r\n _replace_variables(k, db, **kwargs): _replace_variables(v, db, **kwargs)\r\n for k, v in x.items()\r\n }\r\n if isinstance(x, (list, tuple)):\r\n return [_replace_variables(v, db) for v in x] -> BUG (need **kwargs here)\r\n if isinstance(x, Variable):\r\n return x.set(db, **kwargs)\r\n if isinstance(x, Document):\r\n return x.set_variables(db, **kwargs)\r\n return x\r\n\r\n```\n", "before_files": [{"content": "import dataclasses as dc\nimport importlib\nimport typing as t\nfrom copy import deepcopy\n\nfrom superduperdb.base.leaf import Leaf\nfrom superduperdb.misc.serialization import asdict\n\n\ndef _from_dict(r: t.Any, db: None = None) -> t.Any:\n from superduperdb.base.document import Document\n from superduperdb.components.datatype import File, LazyArtifact\n\n if isinstance(r, Document):\n r = r.unpack(db, leaves_to_keep=(LazyArtifact, File))\n if isinstance(r, (list, tuple)):\n return [_from_dict(i, db=db) for i in r]\n if not isinstance(r, dict):\n return r\n if '_content' in r:\n r = r['_content']\n if 'cls' in r and 'module' in r and 'dict' in r:\n module = importlib.import_module(r['module'])\n cls_ = getattr(module, r['cls'])\n kwargs = _from_dict(r['dict'])\n kwargs_init = {k: v for k, v in kwargs.items() if k not in cls_.set_post_init}\n kwargs_post_init = {k: v for k, v in kwargs.items() if k in cls_.set_post_init}\n instance = cls_(**kwargs_init)\n for k, v in kwargs_post_init.items():\n setattr(instance, k, v)\n return instance\n else:\n return {k: _from_dict(v, db=db) for k, v in r.items()}\n\n\nclass VariableError(Exception):\n ...\n\n\ndef _find_variables(r):\n if isinstance(r, dict):\n return sum([_find_variables(v) for v in r.values()], [])\n elif isinstance(r, (list, tuple)):\n return sum([_find_variables(v) for v in r], [])\n elif isinstance(r, Variable):\n return [r]\n return []\n\n\ndef _replace_variables(x, db, **kwargs):\n from .document import Document\n\n if isinstance(x, dict):\n return {\n _replace_variables(k, db, **kwargs): _replace_variables(v, db, **kwargs)\n for k, v in x.items()\n }\n if isinstance(x, (list, tuple)):\n return [_replace_variables(v, db) for v in x]\n if isinstance(x, Variable):\n return x.set(db, **kwargs)\n if isinstance(x, Document):\n return x.set_variables(db, **kwargs)\n return x\n\n\[email protected]\nclass Serializable(Leaf):\n \"\"\"\n Base class for serializable objects. This class is used to serialize and\n deserialize objects to and from JSON + Artifact instances.\n \"\"\"\n\n set_post_init: t.ClassVar[t.Sequence] = ()\n\n @property\n def unique_id(self):\n return str(hash(self.dict().encode()))\n\n @property\n def variables(self) -> t.List['Variable']:\n out = {}\n r = self.encode(leaf_types_to_keep=(Variable,))\n v = _find_variables(r)\n for var in v:\n out[var.value] = var\n return sorted(list(out.values()), key=lambda x: x.value)\n\n def set_variables(self, db, **kwargs) -> 'Serializable':\n \"\"\"\n Set free variables of self.\n\n :param db:\n \"\"\"\n r = self.encode(leaf_types_to_keep=(Variable,))\n r = _replace_variables(r, db, **kwargs)\n return self.decode(r)\n\n def encode(\n self,\n leaf_types_to_keep: t.Sequence = (),\n ):\n r = dict(self.dict().encode(leaf_types_to_keep=leaf_types_to_keep))\n r['leaf_type'] = 'serializable'\n return {'_content': r}\n\n @classmethod\n def decode(cls, r, db: t.Optional[t.Any] = None):\n return _from_dict(r, db=db)\n\n def dict(self):\n from superduperdb import Document\n\n return Document(asdict(self))\n\n def copy(self):\n return deepcopy(self)\n\n\[email protected]\nclass Variable(Serializable):\n \"\"\"\n Mechanism for allowing \"free variables\" in a serializable object.\n The idea is to allow a variable to be set at runtime, rather than\n at object creation time.\n\n :param value: The name of the variable to be set at runtime.\n :param setter_callback: A callback function that takes the value, datalayer\n and kwargs as input and returns the formatted\n variable.\n \"\"\"\n\n value: t.Any\n setter_callback: dc.InitVar[t.Optional[t.Callable]] = None\n\n def __post_init__(self, setter_callback):\n self.setter_callback = setter_callback\n\n def __repr__(self) -> str:\n return '$' + str(self.value)\n\n def __hash__(self) -> int:\n return hash(self.value)\n\n def set(self, db, **kwargs):\n \"\"\"\n Get the intended value from the values of the global variables.\n\n >>> Variable('number').set(db, number=1.5, other='test')\n 1.5\n\n :param db: The datalayer instance.\n :param kwargs: Variables to be used in the setter_callback\n or as formatting variables.\n \"\"\"\n if self.setter_callback is not None:\n try:\n return self.setter_callback(db, self.value, kwargs)\n except Exception as e:\n raise VariableError(\n f'Could not set variable {self.value} '\n f'based on {self.setter_callback} and **kwargs: {kwargs}'\n ) from e\n else:\n assert isinstance(self.value, str)\n return kwargs[self.value]\n", "path": "superduperdb/base/serializable.py"}], "after_files": [{"content": "import dataclasses as dc\nimport importlib\nimport typing as t\nfrom copy import deepcopy\n\nfrom superduperdb.base.leaf import Leaf\nfrom superduperdb.misc.serialization import asdict\n\n\ndef _from_dict(r: t.Any, db: None = None) -> t.Any:\n from superduperdb.base.document import Document\n from superduperdb.components.datatype import File, LazyArtifact\n\n if isinstance(r, Document):\n r = r.unpack(db, leaves_to_keep=(LazyArtifact, File))\n if isinstance(r, (list, tuple)):\n return [_from_dict(i, db=db) for i in r]\n if not isinstance(r, dict):\n return r\n if '_content' in r:\n r = r['_content']\n if 'cls' in r and 'module' in r and 'dict' in r:\n module = importlib.import_module(r['module'])\n cls_ = getattr(module, r['cls'])\n kwargs = _from_dict(r['dict'])\n kwargs_init = {k: v for k, v in kwargs.items() if k not in cls_.set_post_init}\n kwargs_post_init = {k: v for k, v in kwargs.items() if k in cls_.set_post_init}\n instance = cls_(**kwargs_init)\n for k, v in kwargs_post_init.items():\n setattr(instance, k, v)\n return instance\n else:\n return {k: _from_dict(v, db=db) for k, v in r.items()}\n\n\nclass VariableError(Exception):\n ...\n\n\ndef _find_variables(r):\n if isinstance(r, dict):\n return sum([_find_variables(v) for v in r.values()], [])\n elif isinstance(r, (list, tuple)):\n return sum([_find_variables(v) for v in r], [])\n elif isinstance(r, Variable):\n return [r]\n return []\n\n\ndef _replace_variables(x, db, **kwargs):\n from .document import Document\n\n if isinstance(x, dict):\n return {\n _replace_variables(k, db, **kwargs): _replace_variables(v, db, **kwargs)\n for k, v in x.items()\n }\n if isinstance(x, (list, tuple)):\n return [_replace_variables(v, db, **kwargs) for v in x]\n if isinstance(x, Variable):\n return x.set(db, **kwargs)\n if isinstance(x, Document):\n return x.set_variables(db, **kwargs)\n return x\n\n\[email protected]\nclass Serializable(Leaf):\n \"\"\"\n Base class for serializable objects. This class is used to serialize and\n deserialize objects to and from JSON + Artifact instances.\n \"\"\"\n\n set_post_init: t.ClassVar[t.Sequence] = ()\n\n @property\n def unique_id(self):\n return str(hash(self.dict().encode()))\n\n @property\n def variables(self) -> t.List['Variable']:\n out = {}\n r = self.encode(leaf_types_to_keep=(Variable,))\n v = _find_variables(r)\n for var in v:\n out[var.value] = var\n return sorted(list(out.values()), key=lambda x: x.value)\n\n def set_variables(self, db, **kwargs) -> 'Serializable':\n \"\"\"\n Set free variables of self.\n\n :param db:\n \"\"\"\n r = self.encode(leaf_types_to_keep=(Variable,))\n r = _replace_variables(r, db, **kwargs)\n return self.decode(r)\n\n def encode(\n self,\n leaf_types_to_keep: t.Sequence = (),\n ):\n r = dict(self.dict().encode(leaf_types_to_keep=leaf_types_to_keep))\n r['leaf_type'] = 'serializable'\n return {'_content': r}\n\n @classmethod\n def decode(cls, r, db: t.Optional[t.Any] = None):\n return _from_dict(r, db=db)\n\n def dict(self):\n from superduperdb import Document\n\n return Document(asdict(self))\n\n def copy(self):\n return deepcopy(self)\n\n\[email protected]\nclass Variable(Serializable):\n \"\"\"\n Mechanism for allowing \"free variables\" in a serializable object.\n The idea is to allow a variable to be set at runtime, rather than\n at object creation time.\n\n :param value: The name of the variable to be set at runtime.\n :param setter_callback: A callback function that takes the value, datalayer\n and kwargs as input and returns the formatted\n variable.\n \"\"\"\n\n value: t.Any\n setter_callback: dc.InitVar[t.Optional[t.Callable]] = None\n\n def __post_init__(self, setter_callback):\n self.setter_callback = setter_callback\n\n def __repr__(self) -> str:\n return '$' + str(self.value)\n\n def __hash__(self) -> int:\n return hash(self.value)\n\n def set(self, db, **kwargs):\n \"\"\"\n Get the intended value from the values of the global variables.\n\n >>> Variable('number').set(db, number=1.5, other='test')\n 1.5\n\n :param db: The datalayer instance.\n :param kwargs: Variables to be used in the setter_callback\n or as formatting variables.\n \"\"\"\n if self.setter_callback is not None:\n try:\n return self.setter_callback(db, self.value, kwargs)\n except Exception as e:\n raise VariableError(\n f'Could not set variable {self.value} '\n f'based on {self.setter_callback} and **kwargs: {kwargs}'\n ) from e\n else:\n assert isinstance(self.value, str)\n return kwargs[self.value]\n", "path": "superduperdb/base/serializable.py"}]}
| 2,050 | 141 |
gh_patches_debug_26370
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-3973
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3: Add Cover Image hangs
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
### Evidence / Screenshot (if possible)
http://localhost:8080/works/OL6037022W/Remix?debug=true on Docker Python 3
* Or http://staging.openlibrary.org/works/OL6037022W/Remix?debug=true
Command to launch Open Library on Docker on Python 3:
```
docker-compose down ; \
PYENV_VERSION=3.9.0 docker-compose -f docker-compose.yml -f docker-compose.infogami-local.yml up -d ; \
docker-compose logs -f --tail=10 web
```
### Relevant url?
<!-- `https://openlibrary.org/...` -->
1. http://localhost:8080/works/OL6037022W/Remix?debug=true
2. On the image, click Add Cover Image
3. Browse and select an appropriate local image file and click Submit
4. Internal Server Error
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...
2. Do ...
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual:
* Expected:
### Details
- **Logged in (Y/N)?**
- **Browser type/version?**
- **Operating system?**
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/upstream/covers.py`
Content:
```
1 """Handle book cover/author photo upload.
2 """
3 import web
4 import simplejson
5
6 from infogami.utils import delegate
7 from infogami.utils.view import safeint
8 from openlibrary import accounts
9 from openlibrary.plugins.upstream.models import Image
10 from openlibrary.plugins.upstream.utils import get_coverstore_url, render_template
11
12 from six.moves import urllib
13
14
15 def setup():
16 pass
17
18 class add_cover(delegate.page):
19 path = "(/books/OL\d+M)/add-cover"
20 cover_category = "b"
21
22 def GET(self, key):
23 book = web.ctx.site.get(key)
24 return render_template('covers/add', book)
25
26 def POST(self, key):
27 book = web.ctx.site.get(key)
28 if not book:
29 raise web.notfound("")
30
31 i = web.input(file={}, url="")
32
33 # remove references to field storage objects
34 web.ctx.pop("_fieldstorage", None)
35
36 data = self.upload(key, i)
37 coverid = data.get('id')
38
39 if coverid:
40 self.save(book, coverid, url=i.url)
41 cover = Image(web.ctx.site, "b", coverid)
42 return render_template("covers/saved", cover)
43 else:
44 return render_template("covers/add", book, {'url': i.url}, data)
45
46 def upload(self, key, i):
47 """Uploads a cover to coverstore and returns the response."""
48 olid = key.split("/")[-1]
49
50 if i.file is not None and hasattr(i.file, 'value'):
51 data = i.file.value
52 else:
53 data = None
54
55 if i.url and i.url.strip() == "http://":
56 i.url = ""
57
58 user = accounts.get_current_user()
59 params = {
60 "author": user and user.key,
61 "data": data,
62 "source_url": i.url,
63 "olid": olid,
64 "ip": web.ctx.ip
65 }
66
67 upload_url = '%s/%s/upload2' % (
68 get_coverstore_url(), self.cover_category)
69
70 if upload_url.startswith("//"):
71 upload_url = "http:" + upload_url
72
73 try:
74 response = urllib.request.urlopen(upload_url, urllib.parse.urlencode(params))
75 out = response.read()
76 except urllib.error.HTTPError as e:
77 out = {'error': e.read()}
78
79 return web.storage(simplejson.loads(out))
80
81 def save(self, book, coverid, url=None):
82 book.covers = [coverid] + [cover.id for cover in book.get_covers()]
83 book._save("Added new cover", action="add-cover", data={"url": url})
84
85 class add_work_cover(add_cover):
86 path = "(/works/OL\d+W)/add-cover"
87 cover_category = "w"
88
89 def upload(self, key, i):
90 if "coverid" in i and safeint(i.coverid):
91 return web.storage(id=int(i.coverid))
92 else:
93 return add_cover.upload(self, key, i)
94
95 class add_photo(add_cover):
96 path = "(/authors/OL\d+A)/add-photo"
97 cover_category = "a"
98
99 def save(self, author, photoid, url=None):
100 author.photos = [photoid] + [photo.id for photo in author.get_photos()]
101 author._save("Added new photo", action="add-photo", data={"url": url})
102
103 class manage_covers(delegate.page):
104 path = "(/books/OL\d+M)/manage-covers"
105 def GET(self, key):
106 book = web.ctx.site.get(key)
107 if not book:
108 raise web.notfound()
109 return render_template("covers/manage", key, self.get_images(book))
110
111 def get_images(self, book):
112 return book.get_covers()
113
114 def get_image(self, book):
115 return book.get_cover()
116
117 def save_images(self, book, covers):
118 book.covers = covers
119 book._save('Update covers')
120
121 def POST(self, key):
122 book = web.ctx.site.get(key)
123 if not book:
124 raise web.notfound()
125
126 images = web.input(image=[]).image
127 if '-' in images:
128 images = [int(id) for id in images[:images.index('-')]]
129 self.save_images(book, images)
130 return render_template("covers/saved", self.get_image(book), showinfo=False)
131 else:
132 # ERROR
133 pass
134
135 class manage_work_covers(manage_covers):
136 path = "(/works/OL\d+W)/manage-covers"
137
138
139 class manage_photos(manage_covers):
140 path = "(/authors/OL\d+A)/manage-photos"
141
142 def get_images(self, author):
143 return author.get_photos()
144
145 def get_image(self, author):
146 return author.get_photo()
147
148 def save_images(self, author, photos):
149 author.photos = photos
150 author._save('Update photos')
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py
--- a/openlibrary/plugins/upstream/covers.py
+++ b/openlibrary/plugins/upstream/covers.py
@@ -1,7 +1,7 @@
"""Handle book cover/author photo upload.
"""
+import requests
import web
-import simplejson
from infogami.utils import delegate
from infogami.utils.view import safeint
@@ -9,8 +9,6 @@
from openlibrary.plugins.upstream.models import Image
from openlibrary.plugins.upstream.utils import get_coverstore_url, render_template
-from six.moves import urllib
-
def setup():
pass
@@ -71,12 +69,10 @@
upload_url = "http:" + upload_url
try:
- response = urllib.request.urlopen(upload_url, urllib.parse.urlencode(params))
- out = response.read()
- except urllib.error.HTTPError as e:
- out = {'error': e.read()}
-
- return web.storage(simplejson.loads(out))
+ payload = requests.compat.urlencode(params).encode('utf-8')
+ return web.storage(requests.post(upload_url, data=payload).json())
+ except requests.HTTPError as e:
+ return web.storage({'error': e.read()})
def save(self, book, coverid, url=None):
book.covers = [coverid] + [cover.id for cover in book.get_covers()]
|
{"golden_diff": "diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py\n--- a/openlibrary/plugins/upstream/covers.py\n+++ b/openlibrary/plugins/upstream/covers.py\n@@ -1,7 +1,7 @@\n \"\"\"Handle book cover/author photo upload.\n \"\"\"\n+import requests\n import web\n-import simplejson\n \n from infogami.utils import delegate\n from infogami.utils.view import safeint\n@@ -9,8 +9,6 @@\n from openlibrary.plugins.upstream.models import Image\n from openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n \n-from six.moves import urllib\n-\n \n def setup():\n pass\n@@ -71,12 +69,10 @@\n upload_url = \"http:\" + upload_url\n \n try:\n- response = urllib.request.urlopen(upload_url, urllib.parse.urlencode(params))\n- out = response.read()\n- except urllib.error.HTTPError as e:\n- out = {'error': e.read()}\n-\n- return web.storage(simplejson.loads(out))\n+ payload = requests.compat.urlencode(params).encode('utf-8')\n+ return web.storage(requests.post(upload_url, data=payload).json())\n+ except requests.HTTPError as e:\n+ return web.storage({'error': e.read()})\n \n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n", "issue": "Python 3: Add Cover Image hangs\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\n\r\n### Evidence / Screenshot (if possible)\r\nhttp://localhost:8080/works/OL6037022W/Remix?debug=true on Docker Python 3\r\n* Or http://staging.openlibrary.org/works/OL6037022W/Remix?debug=true\r\n\r\nCommand to launch Open Library on Docker on Python 3:\r\n```\r\ndocker-compose down ; \\\r\n PYENV_VERSION=3.9.0 docker-compose -f docker-compose.yml -f docker-compose.infogami-local.yml up -d ; \\\r\n docker-compose logs -f --tail=10 web\r\n```\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n1. http://localhost:8080/works/OL6037022W/Remix?debug=true\r\n2. On the image, click Add Cover Image\r\n3. Browse and select an appropriate local image file and click Submit\r\n4. Internal Server Error\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...\r\n2. Do ...\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: \r\n* Expected: \r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** \r\n- **Browser type/version?** \r\n- **Operating system?** \r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\n", "before_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nimport web\nimport simplejson\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n\nfrom six.moves import urllib\n\n\ndef setup():\n pass\n\nclass add_cover(delegate.page):\n path = \"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n coverid = data.get('id')\n\n if coverid:\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"http://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"data\": data,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip\n }\n\n upload_url = '%s/%s/upload2' % (\n get_coverstore_url(), self.cover_category)\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n response = urllib.request.urlopen(upload_url, urllib.parse.urlencode(params))\n out = response.read()\n except urllib.error.HTTPError as e:\n out = {'error': e.read()}\n\n return web.storage(simplejson.loads(out))\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\"Added new cover\", action=\"add-cover\", data={\"url\": url})\n\nclass add_work_cover(add_cover):\n path = \"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\nclass add_photo(add_cover):\n path = \"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\nclass manage_covers(delegate.page):\n path = \"(/books/OL\\d+M)/manage-covers\"\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[:images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\nclass manage_work_covers(manage_covers):\n path = \"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = \"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}], "after_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nimport requests\nimport web\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n\n\ndef setup():\n pass\n\nclass add_cover(delegate.page):\n path = \"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n coverid = data.get('id')\n\n if coverid:\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"http://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"data\": data,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip\n }\n\n upload_url = '%s/%s/upload2' % (\n get_coverstore_url(), self.cover_category)\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n payload = requests.compat.urlencode(params).encode('utf-8')\n return web.storage(requests.post(upload_url, data=payload).json())\n except requests.HTTPError as e:\n return web.storage({'error': e.read()})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\"Added new cover\", action=\"add-cover\", data={\"url\": url})\n\nclass add_work_cover(add_cover):\n path = \"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\nclass add_photo(add_cover):\n path = \"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\nclass manage_covers(delegate.page):\n path = \"(/books/OL\\d+M)/manage-covers\"\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[:images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\nclass manage_work_covers(manage_covers):\n path = \"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = \"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}]}
| 2,079 | 314 |
gh_patches_debug_4333
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1416
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pytuple can occassionally return fewer than expected elements
* Faker version: 6.1.1
* OS: Mac OSX 10.15.5
Sporadically using pytuple may result in the tuple having less than the requested elements (`nb_elements`) even when `variable_nb_elements` is set to False.
This happens because pytuple relies on pyset, returning `tuple(self.pyset(...))`. Because it delegates to a set rather than a list, any duplicate numbers generated will result in the set, and following that the tuple, having fewer numbers than expected.
Suggest that the appropriate fix might be to use `pylist` instead of `pyset`
### Steps to reproduce
1. Specify `nb_elements = 3` in a call to pytuple, `variable_nb_elements= False`
2. Repeat until the tuple 'randomly' contains less than 3 elements
```python
import faker
fake = faker.Faker()
for x in range(10000):
random_tuple = fake.pytuple(nb_elements=3, variable_nb_elements=False, value_types=[int])
assert len(random_tuple) == 3, f"Tuple {random_tuple} not len 3 at iteration {x}"
```
### Expected behavior
When calling pytuple with `nb_elements = 3` and `variable_nb_elements = False` the tuple should always contain 3 elements, even if there are duplicate values.
### Actual behavior
Sporadically the tuple contains less than 3 elements.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/python/__init__.py`
Content:
```
1 import string
2 import sys
3 import warnings
4
5 from decimal import Decimal
6
7 from .. import BaseProvider
8
9
10 class Provider(BaseProvider):
11 default_value_types = (
12 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',
13 'date_time', 'uri', 'email',
14 )
15
16 def _check_signature(self, value_types, allowed_types):
17 if value_types is not None and not isinstance(value_types, (list, tuple)):
18 value_types = [value_types]
19 warnings.warn(
20 'Passing value types as positional arguments is going to be '
21 'deprecated. Pass them as a list or tuple instead.',
22 PendingDeprecationWarning,
23 )
24 if value_types is None:
25 value_types = ()
26 return tuple(value_types) + allowed_types
27
28 def pybool(self):
29 return self.random_int(0, 1) == 1
30
31 def pystr(self, min_chars=None, max_chars=20):
32 """
33 Generates a random string of upper and lowercase letters.
34 :type min_chars: int
35 :type max_chars: int
36 :return: String. Random of random length between min and max characters.
37 """
38 if min_chars is None:
39 return "".join(self.random_letters(length=max_chars))
40 else:
41 assert (
42 max_chars >= min_chars), "Maximum length must be greater than or equal to minimum length"
43 return "".join(
44 self.random_letters(
45 length=self.generator.random.randint(min_chars, max_chars),
46 ),
47 )
48
49 def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):
50 return self.bothify(self.generator.parse(string_format), letters=letters)
51
52 def pyfloat(self, left_digits=None, right_digits=None, positive=False,
53 min_value=None, max_value=None):
54 if left_digits is not None and left_digits < 0:
55 raise ValueError(
56 'A float number cannot have less than 0 digits in its '
57 'integer part')
58 if right_digits is not None and right_digits < 0:
59 raise ValueError(
60 'A float number cannot have less than 0 digits in its '
61 'fractional part')
62 if left_digits == 0 and right_digits == 0:
63 raise ValueError(
64 'A float number cannot have less than 0 digits in total')
65 if None not in (min_value, max_value) and min_value > max_value:
66 raise ValueError('Min value cannot be greater than max value')
67 if None not in (min_value, max_value) and min_value == max_value:
68 raise ValueError('Min and max value cannot be the same')
69 if positive and min_value is not None and min_value <= 0:
70 raise ValueError(
71 'Cannot combine positive=True with negative or zero min_value')
72
73 left_digits = left_digits if left_digits is not None else (
74 self.random_int(1, sys.float_info.dig))
75 right_digits = right_digits if right_digits is not None else (
76 self.random_int(0, sys.float_info.dig - left_digits))
77 sign = ''
78 if (min_value is not None) or (max_value is not None):
79 if max_value is not None and max_value < 0:
80 max_value += 1 # as the random_int will be generated up to max_value - 1
81 if min_value is not None and min_value < 0:
82 min_value += 1 # as we then append digits after the left_number
83 left_number = self._safe_random_int(
84 min_value, max_value, positive,
85 )
86 else:
87 sign = '+' if positive else self.random_element(('+', '-'))
88 left_number = self.random_number(left_digits)
89
90 result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')
91 if positive and result == 0:
92 if right_digits:
93 result = float('0.' + '0' * (right_digits - 1) + '1')
94 else:
95 result += sys.float_info.epsilon
96 return result
97
98 def _safe_random_int(self, min_value, max_value, positive):
99 orig_min_value = min_value
100 orig_max_value = max_value
101
102 if min_value is None:
103 min_value = max_value - self.random_int()
104 if max_value is None:
105 max_value = min_value + self.random_int()
106 if positive:
107 min_value = max(min_value, 0)
108
109 if min_value == max_value:
110 return self._safe_random_int(orig_min_value, orig_max_value, positive)
111 else:
112 return self.random_int(min_value, max_value - 1)
113
114 def pyint(self, min_value=0, max_value=9999, step=1):
115 return self.generator.random_int(min_value, max_value, step=step)
116
117 def pydecimal(self, left_digits=None, right_digits=None, positive=False,
118 min_value=None, max_value=None):
119
120 float_ = self.pyfloat(
121 left_digits, right_digits, positive, min_value, max_value)
122 return Decimal(str(float_))
123
124 def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
125 return tuple(
126 self.pyset(
127 nb_elements,
128 variable_nb_elements,
129 value_types,
130 *allowed_types))
131
132 def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
133 return set(
134 self._pyiterable(
135 nb_elements,
136 variable_nb_elements,
137 value_types,
138 *allowed_types))
139
140 def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
141 return list(
142 self._pyiterable(
143 nb_elements,
144 variable_nb_elements,
145 value_types,
146 *allowed_types))
147
148 def pyiterable(
149 self,
150 nb_elements=10,
151 variable_nb_elements=True,
152 value_types=None,
153 *allowed_types):
154 value_types = self._check_signature(value_types, allowed_types)
155 return self.random_element([self.pylist, self.pytuple, self.pyset])(
156 nb_elements, variable_nb_elements, value_types, *allowed_types)
157
158 def _random_type(self, type_list):
159 value_type = self.random_element(type_list)
160
161 method_name = f'py{value_type}'
162 if hasattr(self, method_name):
163 value_type = method_name
164
165 return self.generator.format(value_type)
166
167 def _pyiterable(
168 self,
169 nb_elements=10,
170 variable_nb_elements=True,
171 value_types=None,
172 *allowed_types):
173
174 value_types = self._check_signature(value_types, allowed_types)
175
176 value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()
177 for t in value_types
178 # avoid recursion
179 if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
180 if not value_types:
181 value_types = self.default_value_types
182
183 if variable_nb_elements:
184 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
185
186 for _ in range(nb_elements):
187 yield self._random_type(value_types)
188
189 def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
190 """
191 Returns a dictionary.
192
193 :nb_elements: number of elements for dictionary
194 :variable_nb_elements: is use variable number of elements for dictionary
195 :value_types: type of dictionary values
196 """
197 if variable_nb_elements:
198 nb_elements = self.randomize_nb_elements(nb_elements, min=1)
199
200 return dict(zip(
201 self.generator.words(nb_elements, unique=True),
202 self._pyiterable(nb_elements, False, value_types, *allowed_types),
203 ))
204
205 def pystruct(self, count=10, value_types=None, *allowed_types):
206 value_types = self._check_signature(value_types, allowed_types)
207
208 value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()
209 for t in value_types
210 # avoid recursion
211 if t != 'struct']
212 if not value_types:
213 value_types = self.default_value_types
214
215 types = []
216 d = {}
217 nd = {}
218 for i in range(count):
219 d[self.generator.word()] = self._random_type(value_types)
220 types.append(self._random_type(value_types))
221 nd[self.generator.word()] = {i: self._random_type(value_types),
222 i + 1: [self._random_type(value_types),
223 self._random_type(value_types),
224 self._random_type(value_types)],
225 i + 2: {i: self._random_type(value_types),
226 i + 1: self._random_type(value_types),
227 i + 2: [self._random_type(value_types),
228 self._random_type(value_types)]}}
229 return types, d, nd
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py
--- a/faker/providers/python/__init__.py
+++ b/faker/providers/python/__init__.py
@@ -123,7 +123,7 @@
def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):
return tuple(
- self.pyset(
+ self._pyiterable(
nb_elements,
variable_nb_elements,
value_types,
|
{"golden_diff": "diff --git a/faker/providers/python/__init__.py b/faker/providers/python/__init__.py\n--- a/faker/providers/python/__init__.py\n+++ b/faker/providers/python/__init__.py\n@@ -123,7 +123,7 @@\n \n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n- self.pyset(\n+ self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n", "issue": "pytuple can occassionally return fewer than expected elements\n* Faker version: 6.1.1\r\n* OS: Mac OSX 10.15.5\r\n\r\nSporadically using pytuple may result in the tuple having less than the requested elements (`nb_elements`) even when `variable_nb_elements` is set to False.\r\n\r\nThis happens because pytuple relies on pyset, returning `tuple(self.pyset(...))`. Because it delegates to a set rather than a list, any duplicate numbers generated will result in the set, and following that the tuple, having fewer numbers than expected.\r\n\r\nSuggest that the appropriate fix might be to use `pylist` instead of `pyset`\r\n\r\n### Steps to reproduce\r\n\r\n1. Specify `nb_elements = 3` in a call to pytuple, `variable_nb_elements= False`\r\n2. Repeat until the tuple 'randomly' contains less than 3 elements \r\n\r\n```python\r\nimport faker\r\n\r\nfake = faker.Faker()\r\n\r\nfor x in range(10000):\r\n random_tuple = fake.pytuple(nb_elements=3, variable_nb_elements=False, value_types=[int])\r\n assert len(random_tuple) == 3, f\"Tuple {random_tuple} not len 3 at iteration {x}\"\r\n```\r\n\r\n### Expected behavior\r\n\r\nWhen calling pytuple with `nb_elements = 3` and `variable_nb_elements = False` the tuple should always contain 3 elements, even if there are duplicate values.\r\n\r\n### Actual behavior\r\n\r\nSporadically the tuple contains less than 3 elements.\r\n\n", "before_files": [{"content": "import string\nimport sys\nimport warnings\n\nfrom decimal import Decimal\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n default_value_types = (\n 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',\n 'date_time', 'uri', 'email',\n )\n\n def _check_signature(self, value_types, allowed_types):\n if value_types is not None and not isinstance(value_types, (list, tuple)):\n value_types = [value_types]\n warnings.warn(\n 'Passing value types as positional arguments is going to be '\n 'deprecated. Pass them as a list or tuple instead.',\n PendingDeprecationWarning,\n )\n if value_types is None:\n value_types = ()\n return tuple(value_types) + allowed_types\n\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minimum length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):\n return self.bothify(self.generator.parse(string_format), letters=letters)\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = ''\n if (min_value is not None) or (max_value is not None):\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n min_value += 1 # as we then append digits after the left_number\n left_number = self._safe_random_int(\n min_value, max_value, positive,\n )\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')\n if positive and result == 0:\n if right_digits:\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n return result\n\n def _safe_random_int(self, min_value, max_value, positive):\n orig_min_value = min_value\n orig_max_value = max_value\n\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n if positive:\n min_value = max(min_value, 0)\n\n if min_value == max_value:\n return self._safe_random_int(orig_min_value, orig_max_value, positive)\n else:\n return self.random_int(min_value, max_value - 1)\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n self.pyset(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, value_types, *allowed_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = f'py{value_type}'\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = self.default_value_types\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements, unique=True),\n self._pyiterable(nb_elements, False, value_types, *allowed_types),\n ))\n\n def pystruct(self, count=10, value_types=None, *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = self.default_value_types\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}], "after_files": [{"content": "import string\nimport sys\nimport warnings\n\nfrom decimal import Decimal\n\nfrom .. import BaseProvider\n\n\nclass Provider(BaseProvider):\n default_value_types = (\n 'str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal',\n 'date_time', 'uri', 'email',\n )\n\n def _check_signature(self, value_types, allowed_types):\n if value_types is not None and not isinstance(value_types, (list, tuple)):\n value_types = [value_types]\n warnings.warn(\n 'Passing value types as positional arguments is going to be '\n 'deprecated. Pass them as a list or tuple instead.',\n PendingDeprecationWarning,\n )\n if value_types is None:\n value_types = ()\n return tuple(value_types) + allowed_types\n\n def pybool(self):\n return self.random_int(0, 1) == 1\n\n def pystr(self, min_chars=None, max_chars=20):\n \"\"\"\n Generates a random string of upper and lowercase letters.\n :type min_chars: int\n :type max_chars: int\n :return: String. Random of random length between min and max characters.\n \"\"\"\n if min_chars is None:\n return \"\".join(self.random_letters(length=max_chars))\n else:\n assert (\n max_chars >= min_chars), \"Maximum length must be greater than or equal to minimum length\"\n return \"\".join(\n self.random_letters(\n length=self.generator.random.randint(min_chars, max_chars),\n ),\n )\n\n def pystr_format(self, string_format='?#-###{{random_int}}{{random_letter}}', letters=string.ascii_letters):\n return self.bothify(self.generator.parse(string_format), letters=letters)\n\n def pyfloat(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n if left_digits is not None and left_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'integer part')\n if right_digits is not None and right_digits < 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in its '\n 'fractional part')\n if left_digits == 0 and right_digits == 0:\n raise ValueError(\n 'A float number cannot have less than 0 digits in total')\n if None not in (min_value, max_value) and min_value > max_value:\n raise ValueError('Min value cannot be greater than max value')\n if None not in (min_value, max_value) and min_value == max_value:\n raise ValueError('Min and max value cannot be the same')\n if positive and min_value is not None and min_value <= 0:\n raise ValueError(\n 'Cannot combine positive=True with negative or zero min_value')\n\n left_digits = left_digits if left_digits is not None else (\n self.random_int(1, sys.float_info.dig))\n right_digits = right_digits if right_digits is not None else (\n self.random_int(0, sys.float_info.dig - left_digits))\n sign = ''\n if (min_value is not None) or (max_value is not None):\n if max_value is not None and max_value < 0:\n max_value += 1 # as the random_int will be generated up to max_value - 1\n if min_value is not None and min_value < 0:\n min_value += 1 # as we then append digits after the left_number\n left_number = self._safe_random_int(\n min_value, max_value, positive,\n )\n else:\n sign = '+' if positive else self.random_element(('+', '-'))\n left_number = self.random_number(left_digits)\n\n result = float(f'{sign}{left_number}.{self.random_number(right_digits)}')\n if positive and result == 0:\n if right_digits:\n result = float('0.' + '0' * (right_digits - 1) + '1')\n else:\n result += sys.float_info.epsilon\n return result\n\n def _safe_random_int(self, min_value, max_value, positive):\n orig_min_value = min_value\n orig_max_value = max_value\n\n if min_value is None:\n min_value = max_value - self.random_int()\n if max_value is None:\n max_value = min_value + self.random_int()\n if positive:\n min_value = max(min_value, 0)\n\n if min_value == max_value:\n return self._safe_random_int(orig_min_value, orig_max_value, positive)\n else:\n return self.random_int(min_value, max_value - 1)\n\n def pyint(self, min_value=0, max_value=9999, step=1):\n return self.generator.random_int(min_value, max_value, step=step)\n\n def pydecimal(self, left_digits=None, right_digits=None, positive=False,\n min_value=None, max_value=None):\n\n float_ = self.pyfloat(\n left_digits, right_digits, positive, min_value, max_value)\n return Decimal(str(float_))\n\n def pytuple(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return tuple(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyset(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return set(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pylist(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n return list(\n self._pyiterable(\n nb_elements,\n variable_nb_elements,\n value_types,\n *allowed_types))\n\n def pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n return self.random_element([self.pylist, self.pytuple, self.pyset])(\n nb_elements, variable_nb_elements, value_types, *allowed_types)\n\n def _random_type(self, type_list):\n value_type = self.random_element(type_list)\n\n method_name = f'py{value_type}'\n if hasattr(self, method_name):\n value_type = method_name\n\n return self.generator.format(value_type)\n\n def _pyiterable(\n self,\n nb_elements=10,\n variable_nb_elements=True,\n value_types=None,\n *allowed_types):\n\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]\n if not value_types:\n value_types = self.default_value_types\n\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n for _ in range(nb_elements):\n yield self._random_type(value_types)\n\n def pydict(self, nb_elements=10, variable_nb_elements=True, value_types=None, *allowed_types):\n \"\"\"\n Returns a dictionary.\n\n :nb_elements: number of elements for dictionary\n :variable_nb_elements: is use variable number of elements for dictionary\n :value_types: type of dictionary values\n \"\"\"\n if variable_nb_elements:\n nb_elements = self.randomize_nb_elements(nb_elements, min=1)\n\n return dict(zip(\n self.generator.words(nb_elements, unique=True),\n self._pyiterable(nb_elements, False, value_types, *allowed_types),\n ))\n\n def pystruct(self, count=10, value_types=None, *allowed_types):\n value_types = self._check_signature(value_types, allowed_types)\n\n value_types = [t if isinstance(t, str) else getattr(t, '__name__', type(t).__name__).lower()\n for t in value_types\n # avoid recursion\n if t != 'struct']\n if not value_types:\n value_types = self.default_value_types\n\n types = []\n d = {}\n nd = {}\n for i in range(count):\n d[self.generator.word()] = self._random_type(value_types)\n types.append(self._random_type(value_types))\n nd[self.generator.word()] = {i: self._random_type(value_types),\n i + 1: [self._random_type(value_types),\n self._random_type(value_types),\n self._random_type(value_types)],\n i + 2: {i: self._random_type(value_types),\n i + 1: self._random_type(value_types),\n i + 2: [self._random_type(value_types),\n self._random_type(value_types)]}}\n return types, d, nd\n", "path": "faker/providers/python/__init__.py"}]}
| 3,113 | 115 |
gh_patches_debug_20226
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-6468
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto Email Report CSV Format Broken
When downloading (or sending) an auto email report with format "CSV" the following error occurs:
```
raceback (most recent call last):
File "/home/frappe/frappe-bench/apps/frappe/frappe/app.py", line 66, in application
response = frappe.api.handle()
File "/home/frappe/frappe-bench/apps/frappe/frappe/api.py", line 56, in handle
return frappe.handler.handle()
File "/home/frappe/frappe-bench/apps/frappe/frappe/handler.py", line 21, in handle
data = execute_cmd(cmd)
File "/home/frappe/frappe-bench/apps/frappe/frappe/handler.py", line 56, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py", line 1007, in call
return fn(*args, **newargs)
File "/home/frappe/frappe-bench/apps/frappe/frappe/email/doctype/auto_email_report/auto_email_report.py", line 153, in download
data = auto_email_report.get_report_content()
File "/home/frappe/frappe-bench/apps/frappe/frappe/email/doctype/auto_email_report/auto_email_report.py", line 61, in get_report_content
filters = self.filters, as_dict=True)
File "/home/frappe/frappe-bench/apps/frappe/frappe/core/doctype/report/report.py", line 152, in get_data
order_by = _format(self.ref_doctype, 'modified') + ' desc'
TypeError: _format() takes exactly 1 argument (2 given)
```
We're using the latest frappe 11.0.3-beta.21.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/core/doctype/report/report.py`
Content:
```
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # MIT License. See license.txt
3
4 from __future__ import unicode_literals
5 import frappe
6 import json
7 from frappe import _
8 import frappe.desk.query_report
9 from frappe.utils import cint
10 from frappe.model.document import Document
11 from frappe.modules.export_file import export_to_files
12 from frappe.modules import make_boilerplate
13 from frappe.core.doctype.page.page import delete_custom_role
14 from frappe.core.doctype.custom_role.custom_role import get_custom_allowed_roles
15 from six import iteritems
16
17
18 class Report(Document):
19 def validate(self):
20 """only administrator can save standard report"""
21 if not self.module:
22 self.module = frappe.db.get_value("DocType", self.ref_doctype, "module")
23
24 if not self.is_standard:
25 self.is_standard = "No"
26 if frappe.session.user=="Administrator" and getattr(frappe.local.conf, 'developer_mode',0)==1:
27 self.is_standard = "Yes"
28
29 if self.is_standard == "No" and frappe.db.get_value("Report", self.name, "is_standard") == "Yes":
30 frappe.throw(_("Cannot edit a standard report. Please duplicate and create a new report"))
31
32 if self.is_standard == "Yes" and frappe.session.user!="Administrator":
33 frappe.throw(_("Only Administrator can save a standard report. Please rename and save."))
34
35 if self.report_type in ("Query Report", "Script Report") \
36 and frappe.session.user!="Administrator":
37 frappe.throw(_("Only Administrator allowed to create Query / Script Reports"))
38
39 if self.report_type == "Report Builder":
40 self.update_report_json()
41
42 def before_insert(self):
43 self.set_doctype_roles()
44
45 def on_update(self):
46 self.export_doc()
47
48 def on_trash(self):
49 delete_custom_role('report', self.name)
50
51 def set_doctype_roles(self):
52 if not self.get('roles') and self.is_standard == 'No':
53 meta = frappe.get_meta(self.ref_doctype)
54 roles = [{'role': d.role} for d in meta.permissions if d.permlevel==0]
55 self.set('roles', roles)
56
57 def is_permitted(self):
58 """Returns true if Has Role is not set or the user is allowed."""
59 from frappe.utils import has_common
60
61 allowed = [d.role for d in frappe.get_all("Has Role", fields=["role"],
62 filters={"parent": self.name})]
63
64 custom_roles = get_custom_allowed_roles('report', self.name)
65 allowed.extend(custom_roles)
66
67 if not allowed:
68 return True
69
70 roles = frappe.get_roles()
71
72 if has_common(roles, allowed):
73 return True
74
75 def update_report_json(self):
76 if not self.json:
77 self.json = '{}'
78
79 if self.json:
80 data = json.loads(self.json)
81 data["add_total_row"] = self.add_total_row
82 self.json = json.dumps(data)
83
84 def export_doc(self):
85 if frappe.flags.in_import:
86 return
87
88 if self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:
89 export_to_files(record_list=[['Report', self.name]],
90 record_module=self.module, create_init=True)
91
92 self.create_report_py()
93
94 def create_report_py(self):
95 if self.report_type == "Script Report":
96 make_boilerplate("controller.py", self, {"name": self.name})
97 make_boilerplate("controller.js", self, {"name": self.name})
98
99 def get_data(self, filters=None, limit=None, user=None, as_dict=False):
100 columns = []
101 out = []
102
103 if self.report_type in ('Query Report', 'Script Report'):
104 # query and script reports
105 data = frappe.desk.query_report.run(self.name, filters=filters, user=user)
106 for d in data.get('columns'):
107 if isinstance(d, dict):
108 col = frappe._dict(d)
109 if not col.fieldname:
110 col.fieldname = col.label
111 columns.append(col)
112 else:
113 fieldtype, options = "Data", None
114 parts = d.split(':')
115 if len(parts) > 1:
116 if parts[1]:
117 fieldtype, options = parts[1], None
118 if fieldtype and '/' in fieldtype:
119 fieldtype, options = fieldtype.split('/')
120
121 columns.append(frappe._dict(label=parts[0], fieldtype=fieldtype, fieldname=parts[0]))
122
123 out += data.get('result')
124 else:
125 # standard report
126 params = json.loads(self.json)
127
128 if params.get('columns'):
129 columns = params.get('columns')
130 else:
131 columns = [['name', self.ref_doctype]]
132 for df in frappe.get_meta(self.ref_doctype).fields:
133 if df.in_list_view:
134 columns.append([df.fieldname, self.ref_doctype])
135
136 _filters = params.get('filters') or []
137
138 if filters:
139 for key, value in iteritems(filters):
140 condition, _value = '=', value
141 if isinstance(value, (list, tuple)):
142 condition, _value = value
143 _filters.append([key, condition, _value])
144
145 def _format(parts):
146 # sort by is saved as DocType.fieldname, covert it to sql
147 return '`tab{0}`.`{1}`'.format(*parts)
148
149 if params.get('sort_by'):
150 order_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')
151 else:
152 order_by = _format(self.ref_doctype, 'modified') + ' desc'
153
154 if params.get('sort_by_next'):
155 order_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')
156
157 result = frappe.get_list(self.ref_doctype,
158 fields = [_format([c[1], c[0]]) for c in columns],
159 filters=_filters,
160 order_by = order_by,
161 as_list=True,
162 limit=limit,
163 user=user)
164
165 _columns = []
166 for column in columns:
167 meta = frappe.get_meta(column[1])
168 field = [meta.get_field(column[0]) or frappe._dict(label=meta.get_label(column[0]), fieldname=column[0])]
169 _columns.extend(field)
170 columns = _columns
171
172 out = out + [list(d) for d in result]
173
174 if as_dict:
175 data = []
176 for row in out:
177 if isinstance(row, (list, tuple)):
178 _row = frappe._dict()
179 for i, val in enumerate(row):
180 _row[columns[i].get('fieldname')] = val
181 elif isinstance(row, dict):
182 # no need to convert from dict to dict
183 _row = frappe._dict(row)
184 data.append(_row)
185 else:
186 data = out
187 return columns, data
188
189
190 @Document.whitelist
191 def toggle_disable(self, disable):
192 self.db_set("disabled", cint(disable))
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/core/doctype/report/report.py b/frappe/core/doctype/report/report.py
--- a/frappe/core/doctype/report/report.py
+++ b/frappe/core/doctype/report/report.py
@@ -127,6 +127,8 @@
if params.get('columns'):
columns = params.get('columns')
+ elif params.get('fields'):
+ columns = params.get('fields')
else:
columns = [['name', self.ref_doctype]]
for df in frappe.get_meta(self.ref_doctype).fields:
@@ -149,7 +151,7 @@
if params.get('sort_by'):
order_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')
else:
- order_by = _format(self.ref_doctype, 'modified') + ' desc'
+ order_by = _format([self.ref_doctype, 'modified']) + ' desc'
if params.get('sort_by_next'):
order_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')
|
{"golden_diff": "diff --git a/frappe/core/doctype/report/report.py b/frappe/core/doctype/report/report.py\n--- a/frappe/core/doctype/report/report.py\n+++ b/frappe/core/doctype/report/report.py\n@@ -127,6 +127,8 @@\n \n \t\t\tif params.get('columns'):\n \t\t\t\tcolumns = params.get('columns')\n+\t\t\telif params.get('fields'):\n+\t\t\t\tcolumns = params.get('fields')\n \t\t\telse:\n \t\t\t\tcolumns = [['name', self.ref_doctype]]\n \t\t\t\tfor df in frappe.get_meta(self.ref_doctype).fields:\n@@ -149,7 +151,7 @@\n \t\t\tif params.get('sort_by'):\n \t\t\t\torder_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')\n \t\t\telse:\n-\t\t\t\torder_by = _format(self.ref_doctype, 'modified') + ' desc'\n+\t\t\t\torder_by = _format([self.ref_doctype, 'modified']) + ' desc'\n \n \t\t\tif params.get('sort_by_next'):\n \t\t\t\torder_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')\n", "issue": "Auto Email Report CSV Format Broken\nWhen downloading (or sending) an auto email report with format \"CSV\" the following error occurs:\r\n\r\n```\r\nraceback (most recent call last):\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/app.py\", line 66, in application\r\n response = frappe.api.handle()\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/api.py\", line 56, in handle\r\n return frappe.handler.handle()\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/handler.py\", line 21, in handle\r\n data = execute_cmd(cmd)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/handler.py\", line 56, in execute_cmd\r\n return frappe.call(method, **frappe.form_dict)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/__init__.py\", line 1007, in call\r\n return fn(*args, **newargs)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/email/doctype/auto_email_report/auto_email_report.py\", line 153, in download\r\n data = auto_email_report.get_report_content()\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/email/doctype/auto_email_report/auto_email_report.py\", line 61, in get_report_content\r\n filters = self.filters, as_dict=True)\r\n File \"/home/frappe/frappe-bench/apps/frappe/frappe/core/doctype/report/report.py\", line 152, in get_data\r\n order_by = _format(self.ref_doctype, 'modified') + ' desc'\r\nTypeError: _format() takes exactly 1 argument (2 given)\r\n```\r\n\r\nWe're using the latest frappe 11.0.3-beta.21.\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport json\nfrom frappe import _\nimport frappe.desk.query_report\nfrom frappe.utils import cint\nfrom frappe.model.document import Document\nfrom frappe.modules.export_file import export_to_files\nfrom frappe.modules import make_boilerplate\nfrom frappe.core.doctype.page.page import delete_custom_role\nfrom frappe.core.doctype.custom_role.custom_role import get_custom_allowed_roles\nfrom six import iteritems\n\n\nclass Report(Document):\n\tdef validate(self):\n\t\t\"\"\"only administrator can save standard report\"\"\"\n\t\tif not self.module:\n\t\t\tself.module = frappe.db.get_value(\"DocType\", self.ref_doctype, \"module\")\n\n\t\tif not self.is_standard:\n\t\t\tself.is_standard = \"No\"\n\t\t\tif frappe.session.user==\"Administrator\" and getattr(frappe.local.conf, 'developer_mode',0)==1:\n\t\t\t\tself.is_standard = \"Yes\"\n\n\t\tif self.is_standard == \"No\" and frappe.db.get_value(\"Report\", self.name, \"is_standard\") == \"Yes\":\n\t\t\tfrappe.throw(_(\"Cannot edit a standard report. Please duplicate and create a new report\"))\n\n\t\tif self.is_standard == \"Yes\" and frappe.session.user!=\"Administrator\":\n\t\t\tfrappe.throw(_(\"Only Administrator can save a standard report. Please rename and save.\"))\n\n\t\tif self.report_type in (\"Query Report\", \"Script Report\") \\\n\t\t\tand frappe.session.user!=\"Administrator\":\n\t\t\tfrappe.throw(_(\"Only Administrator allowed to create Query / Script Reports\"))\n\n\t\tif self.report_type == \"Report Builder\":\n\t\t\tself.update_report_json()\n\n\tdef before_insert(self):\n\t\tself.set_doctype_roles()\n\n\tdef on_update(self):\n\t\tself.export_doc()\n\n\tdef on_trash(self):\n\t\tdelete_custom_role('report', self.name)\n\n\tdef set_doctype_roles(self):\n\t\tif not self.get('roles') and self.is_standard == 'No':\n\t\t\tmeta = frappe.get_meta(self.ref_doctype)\n\t\t\troles = [{'role': d.role} for d in meta.permissions if d.permlevel==0]\n\t\t\tself.set('roles', roles)\n\n\tdef is_permitted(self):\n\t\t\"\"\"Returns true if Has Role is not set or the user is allowed.\"\"\"\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [d.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"],\n\t\t\tfilters={\"parent\": self.name})]\n\n\t\tcustom_roles = get_custom_allowed_roles('report', self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True\n\n\tdef update_report_json(self):\n\t\tif not self.json:\n\t\t\tself.json = '{}'\n\n\t\tif self.json:\n\t\t\tdata = json.loads(self.json)\n\t\t\tdata[\"add_total_row\"] = self.add_total_row\n\t\t\tself.json = json.dumps(data)\n\n\tdef export_doc(self):\n\t\tif frappe.flags.in_import:\n\t\t\treturn\n\n\t\tif self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:\n\t\t\texport_to_files(record_list=[['Report', self.name]],\n\t\t\t\trecord_module=self.module, create_init=True)\n\n\t\t\tself.create_report_py()\n\n\tdef create_report_py(self):\n\t\tif self.report_type == \"Script Report\":\n\t\t\tmake_boilerplate(\"controller.py\", self, {\"name\": self.name})\n\t\t\tmake_boilerplate(\"controller.js\", self, {\"name\": self.name})\n\n\tdef get_data(self, filters=None, limit=None, user=None, as_dict=False):\n\t\tcolumns = []\n\t\tout = []\n\n\t\tif self.report_type in ('Query Report', 'Script Report'):\n\t\t\t# query and script reports\n\t\t\tdata = frappe.desk.query_report.run(self.name, filters=filters, user=user)\n\t\t\tfor d in data.get('columns'):\n\t\t\t\tif isinstance(d, dict):\n\t\t\t\t\tcol = frappe._dict(d)\n\t\t\t\t\tif not col.fieldname:\n\t\t\t\t\t\tcol.fieldname = col.label\n\t\t\t\t\tcolumns.append(col)\n\t\t\t\telse:\n\t\t\t\t\tfieldtype, options = \"Data\", None\n\t\t\t\t\tparts = d.split(':')\n\t\t\t\t\tif len(parts) > 1:\n\t\t\t\t\t\tif parts[1]:\n\t\t\t\t\t\t\tfieldtype, options = parts[1], None\n\t\t\t\t\t\t\tif fieldtype and '/' in fieldtype:\n\t\t\t\t\t\t\t\tfieldtype, options = fieldtype.split('/')\n\n\t\t\t\t\tcolumns.append(frappe._dict(label=parts[0], fieldtype=fieldtype, fieldname=parts[0]))\n\n\t\t\tout += data.get('result')\n\t\telse:\n\t\t\t# standard report\n\t\t\tparams = json.loads(self.json)\n\n\t\t\tif params.get('columns'):\n\t\t\t\tcolumns = params.get('columns')\n\t\t\telse:\n\t\t\t\tcolumns = [['name', self.ref_doctype]]\n\t\t\t\tfor df in frappe.get_meta(self.ref_doctype).fields:\n\t\t\t\t\tif df.in_list_view:\n\t\t\t\t\t\tcolumns.append([df.fieldname, self.ref_doctype])\n\n\t\t\t_filters = params.get('filters') or []\n\n\t\t\tif filters:\n\t\t\t\tfor key, value in iteritems(filters):\n\t\t\t\t\tcondition, _value = '=', value\n\t\t\t\t\tif isinstance(value, (list, tuple)):\n\t\t\t\t\t\tcondition, _value = value\n\t\t\t\t\t_filters.append([key, condition, _value])\n\n\t\t\tdef _format(parts):\n\t\t\t\t# sort by is saved as DocType.fieldname, covert it to sql\n\t\t\t\treturn '`tab{0}`.`{1}`'.format(*parts)\n\n\t\t\tif params.get('sort_by'):\n\t\t\t\torder_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')\n\t\t\telse:\n\t\t\t\torder_by = _format(self.ref_doctype, 'modified') + ' desc'\n\n\t\t\tif params.get('sort_by_next'):\n\t\t\t\torder_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')\n\n\t\t\tresult = frappe.get_list(self.ref_doctype,\n\t\t\t\tfields = [_format([c[1], c[0]]) for c in columns],\n\t\t\t\tfilters=_filters,\n\t\t\t\torder_by = order_by,\n\t\t\t\tas_list=True,\n\t\t\t\tlimit=limit,\n\t\t\t\tuser=user)\n\n\t\t\t_columns = []\n\t\t\tfor column in columns:\n\t\t\t\tmeta = frappe.get_meta(column[1])\n\t\t\t\tfield = [meta.get_field(column[0]) or frappe._dict(label=meta.get_label(column[0]), fieldname=column[0])]\n\t\t\t\t_columns.extend(field)\n\t\t\tcolumns = _columns\n\n\t\t\tout = out + [list(d) for d in result]\n\n\t\tif as_dict:\n\t\t\tdata = []\n\t\t\tfor row in out:\n\t\t\t\tif isinstance(row, (list, tuple)):\n\t\t\t\t\t_row = frappe._dict()\n\t\t\t\t\tfor i, val in enumerate(row):\n\t\t\t\t\t\t_row[columns[i].get('fieldname')] = val\n\t\t\t\telif isinstance(row, dict):\n\t\t\t\t\t# no need to convert from dict to dict\n\t\t\t\t\t_row = frappe._dict(row)\n\t\t\t\tdata.append(_row)\n\t\telse:\n\t\t\tdata = out\n\t\treturn columns, data\n\n\n\[email protected]\n\tdef toggle_disable(self, disable):\n\t\tself.db_set(\"disabled\", cint(disable))\n", "path": "frappe/core/doctype/report/report.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport json\nfrom frappe import _\nimport frappe.desk.query_report\nfrom frappe.utils import cint\nfrom frappe.model.document import Document\nfrom frappe.modules.export_file import export_to_files\nfrom frappe.modules import make_boilerplate\nfrom frappe.core.doctype.page.page import delete_custom_role\nfrom frappe.core.doctype.custom_role.custom_role import get_custom_allowed_roles\nfrom six import iteritems\n\n\nclass Report(Document):\n\tdef validate(self):\n\t\t\"\"\"only administrator can save standard report\"\"\"\n\t\tif not self.module:\n\t\t\tself.module = frappe.db.get_value(\"DocType\", self.ref_doctype, \"module\")\n\n\t\tif not self.is_standard:\n\t\t\tself.is_standard = \"No\"\n\t\t\tif frappe.session.user==\"Administrator\" and getattr(frappe.local.conf, 'developer_mode',0)==1:\n\t\t\t\tself.is_standard = \"Yes\"\n\n\t\tif self.is_standard == \"No\" and frappe.db.get_value(\"Report\", self.name, \"is_standard\") == \"Yes\":\n\t\t\tfrappe.throw(_(\"Cannot edit a standard report. Please duplicate and create a new report\"))\n\n\t\tif self.is_standard == \"Yes\" and frappe.session.user!=\"Administrator\":\n\t\t\tfrappe.throw(_(\"Only Administrator can save a standard report. Please rename and save.\"))\n\n\t\tif self.report_type in (\"Query Report\", \"Script Report\") \\\n\t\t\tand frappe.session.user!=\"Administrator\":\n\t\t\tfrappe.throw(_(\"Only Administrator allowed to create Query / Script Reports\"))\n\n\t\tif self.report_type == \"Report Builder\":\n\t\t\tself.update_report_json()\n\n\tdef before_insert(self):\n\t\tself.set_doctype_roles()\n\n\tdef on_update(self):\n\t\tself.export_doc()\n\n\tdef on_trash(self):\n\t\tdelete_custom_role('report', self.name)\n\n\tdef set_doctype_roles(self):\n\t\tif not self.get('roles') and self.is_standard == 'No':\n\t\t\tmeta = frappe.get_meta(self.ref_doctype)\n\t\t\troles = [{'role': d.role} for d in meta.permissions if d.permlevel==0]\n\t\t\tself.set('roles', roles)\n\n\tdef is_permitted(self):\n\t\t\"\"\"Returns true if Has Role is not set or the user is allowed.\"\"\"\n\t\tfrom frappe.utils import has_common\n\n\t\tallowed = [d.role for d in frappe.get_all(\"Has Role\", fields=[\"role\"],\n\t\t\tfilters={\"parent\": self.name})]\n\n\t\tcustom_roles = get_custom_allowed_roles('report', self.name)\n\t\tallowed.extend(custom_roles)\n\n\t\tif not allowed:\n\t\t\treturn True\n\n\t\troles = frappe.get_roles()\n\n\t\tif has_common(roles, allowed):\n\t\t\treturn True\n\n\tdef update_report_json(self):\n\t\tif not self.json:\n\t\t\tself.json = '{}'\n\n\t\tif self.json:\n\t\t\tdata = json.loads(self.json)\n\t\t\tdata[\"add_total_row\"] = self.add_total_row\n\t\t\tself.json = json.dumps(data)\n\n\tdef export_doc(self):\n\t\tif frappe.flags.in_import:\n\t\t\treturn\n\n\t\tif self.is_standard == 'Yes' and (frappe.local.conf.get('developer_mode') or 0) == 1:\n\t\t\texport_to_files(record_list=[['Report', self.name]],\n\t\t\t\trecord_module=self.module, create_init=True)\n\n\t\t\tself.create_report_py()\n\n\tdef create_report_py(self):\n\t\tif self.report_type == \"Script Report\":\n\t\t\tmake_boilerplate(\"controller.py\", self, {\"name\": self.name})\n\t\t\tmake_boilerplate(\"controller.js\", self, {\"name\": self.name})\n\n\tdef get_data(self, filters=None, limit=None, user=None, as_dict=False):\n\t\tcolumns = []\n\t\tout = []\n\n\t\tif self.report_type in ('Query Report', 'Script Report'):\n\t\t\t# query and script reports\n\t\t\tdata = frappe.desk.query_report.run(self.name, filters=filters, user=user)\n\t\t\tfor d in data.get('columns'):\n\t\t\t\tif isinstance(d, dict):\n\t\t\t\t\tcol = frappe._dict(d)\n\t\t\t\t\tif not col.fieldname:\n\t\t\t\t\t\tcol.fieldname = col.label\n\t\t\t\t\tcolumns.append(col)\n\t\t\t\telse:\n\t\t\t\t\tfieldtype, options = \"Data\", None\n\t\t\t\t\tparts = d.split(':')\n\t\t\t\t\tif len(parts) > 1:\n\t\t\t\t\t\tif parts[1]:\n\t\t\t\t\t\t\tfieldtype, options = parts[1], None\n\t\t\t\t\t\t\tif fieldtype and '/' in fieldtype:\n\t\t\t\t\t\t\t\tfieldtype, options = fieldtype.split('/')\n\n\t\t\t\t\tcolumns.append(frappe._dict(label=parts[0], fieldtype=fieldtype, fieldname=parts[0]))\n\n\t\t\tout += data.get('result')\n\t\telse:\n\t\t\t# standard report\n\t\t\tparams = json.loads(self.json)\n\n\t\t\tif params.get('columns'):\n\t\t\t\tcolumns = params.get('columns')\n\t\t\telif params.get('fields'):\n\t\t\t\tcolumns = params.get('fields')\n\t\t\telse:\n\t\t\t\tcolumns = [['name', self.ref_doctype]]\n\t\t\t\tfor df in frappe.get_meta(self.ref_doctype).fields:\n\t\t\t\t\tif df.in_list_view:\n\t\t\t\t\t\tcolumns.append([df.fieldname, self.ref_doctype])\n\n\t\t\t_filters = params.get('filters') or []\n\n\t\t\tif filters:\n\t\t\t\tfor key, value in iteritems(filters):\n\t\t\t\t\tcondition, _value = '=', value\n\t\t\t\t\tif isinstance(value, (list, tuple)):\n\t\t\t\t\t\tcondition, _value = value\n\t\t\t\t\t_filters.append([key, condition, _value])\n\n\t\t\tdef _format(parts):\n\t\t\t\t# sort by is saved as DocType.fieldname, covert it to sql\n\t\t\t\treturn '`tab{0}`.`{1}`'.format(*parts)\n\n\t\t\tif params.get('sort_by'):\n\t\t\t\torder_by = _format(params.get('sort_by').split('.')) + ' ' + params.get('sort_order')\n\t\t\telse:\n\t\t\t\torder_by = _format([self.ref_doctype, 'modified']) + ' desc'\n\n\t\t\tif params.get('sort_by_next'):\n\t\t\t\torder_by += ', ' + _format(params.get('sort_by_next').split('.')) + ' ' + params.get('sort_order_next')\n\n\t\t\tresult = frappe.get_list(self.ref_doctype,\n\t\t\t\tfields = [_format([c[1], c[0]]) for c in columns],\n\t\t\t\tfilters=_filters,\n\t\t\t\torder_by = order_by,\n\t\t\t\tas_list=True,\n\t\t\t\tlimit=limit,\n\t\t\t\tuser=user)\n\n\t\t\t_columns = []\n\t\t\tfor column in columns:\n\t\t\t\tmeta = frappe.get_meta(column[1])\n\t\t\t\tfield = [meta.get_field(column[0]) or frappe._dict(label=meta.get_label(column[0]), fieldname=column[0])]\n\t\t\t\t_columns.extend(field)\n\t\t\tcolumns = _columns\n\n\t\t\tout = out + [list(d) for d in result]\n\n\t\tif as_dict:\n\t\t\tdata = []\n\t\t\tfor row in out:\n\t\t\t\tif isinstance(row, (list, tuple)):\n\t\t\t\t\t_row = frappe._dict()\n\t\t\t\t\tfor i, val in enumerate(row):\n\t\t\t\t\t\t_row[columns[i].get('fieldname')] = val\n\t\t\t\telif isinstance(row, dict):\n\t\t\t\t\t# no need to convert from dict to dict\n\t\t\t\t\t_row = frappe._dict(row)\n\t\t\t\tdata.append(_row)\n\t\telse:\n\t\t\tdata = out\n\t\treturn columns, data\n\n\n\[email protected]\n\tdef toggle_disable(self, disable):\n\t\tself.db_set(\"disabled\", cint(disable))\n", "path": "frappe/core/doctype/report/report.py"}]}
| 2,714 | 252 |
gh_patches_debug_2845
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-python-4664
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UserWarning in decoding_rsa example
Running this example, I get the following warning
decoding_rsa.py:94: RuntimeWarning: More events than colors available. You should pass a list of unique colors.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/decoding/decoding_rsa.py`
Content:
```
1 """
2
3 .. _rsa_noplot:
4
5 ====================================
6 Representational Similarity Analysis
7 ====================================
8
9 Representational Similarity Analysis is used to perform summary statistics
10 on supervised classifications where the number of classes is relatively high.
11 It consists in characterizing the structure of the confusion matrix to infer
12 the similarity between brain responses and serves as a proxy for characterizing
13 the space of mental representations [1]_ [2]_ [3]_.
14
15 In this example, we perform RSA on responses to 24 object images (among
16 a list of 92 images). Subjects were presented with images of human, animal
17 and inanimate objects [4]_. Here we use the 24 unique images of faces
18 and body parts.
19
20 .. note:: this example will download a very large (~6GB) file, so we will not
21 build the images below.
22
23 References
24 ----------
25
26 .. [1] Shepard, R. "Multidimensional scaling, tree-fitting, and clustering."
27 Science 210.4468 (1980): 390-398.
28 .. [2] Laakso, A. & Cottrell, G.. "Content and cluster analysis:
29 assessing representational similarity in neural systems." Philosophical
30 psychology 13.1 (2000): 47-76.
31 .. [3] Kriegeskorte, N., Marieke, M., & Bandettini. P. "Representational
32 similarity analysis-connecting the branches of systems neuroscience."
33 Frontiers in systems neuroscience 2 (2008): 4.
34 .. [4] Cichy, R. M., Pantazis, D., & Oliva, A. "Resolving human object
35 recognition in space and time." Nature neuroscience (2014): 17(3),
36 455-462.
37 """
38
39 # Authors: Jean-Remi King <[email protected]>
40 # Jaakko Leppakangas <[email protected]>
41 # Alexandre Gramfort <[email protected]>
42 #
43 # License: BSD (3-clause)
44
45 import os.path as op
46 import numpy as np
47 from pandas import read_csv
48 import matplotlib.pyplot as plt
49
50 from sklearn.model_selection import StratifiedKFold
51 from sklearn.pipeline import make_pipeline
52 from sklearn.preprocessing import StandardScaler
53 from sklearn.linear_model import LogisticRegression
54 from sklearn.metrics import roc_auc_score
55 from sklearn.manifold import MDS
56
57 import mne
58 from mne.io import read_raw_fif, concatenate_raws
59 from mne.datasets import visual_92_categories
60
61 print(__doc__)
62
63 data_path = visual_92_categories.data_path()
64
65 # Define stimulus - trigger mapping
66 fname = op.join(data_path, 'visual_stimuli.csv')
67 conds = read_csv(fname)
68 print(conds.head(5))
69
70 ##############################################################################
71 # Let's restrict the number of conditions to speed up computation
72 max_trigger = 24
73 conds = conds[:max_trigger] # take only the first 24 rows
74
75 ##############################################################################
76 # Define stimulus - trigger mapping
77 conditions = []
78 for c in conds.values:
79 cond_tags = list(c[:2])
80 cond_tags += [('not-' if i == 0 else '') + conds.columns[k]
81 for k, i in enumerate(c[2:], 2)]
82 conditions.append('/'.join(map(str, cond_tags)))
83 print(conditions[:10])
84
85 ##############################################################################
86 # Let's make the event_id dictionary
87 event_id = dict(zip(conditions, conds.trigger + 1))
88 event_id['0/human bodypart/human/not-face/animal/natural']
89
90 ##############################################################################
91 # Read MEG data
92 n_runs = 4 # 4 for full data (use less to speed up computations)
93 fname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')
94 raws = [read_raw_fif(fname % block) for block in range(n_runs)]
95 raw = concatenate_raws(raws)
96
97 events = mne.find_events(raw, min_duration=.002)
98
99 events = events[events[:, 2] <= max_trigger]
100 mne.viz.plot_events(events, sfreq=raw.info['sfreq'])
101
102 ##############################################################################
103 # Epoch data
104 picks = mne.pick_types(raw.info, meg=True)
105 epochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,
106 picks=picks, tmin=-.1, tmax=.500, preload=True)
107
108 ##############################################################################
109 # Let's plot some conditions
110 epochs['face'].average().plot()
111 epochs['not-face'].average().plot()
112
113 ##############################################################################
114 # Representational Similarity Analysis (RSA) is a neuroimaging-specific
115 # appelation to refer to statistics applied to the confusion matrix
116 # also referred to as the representational dissimilarity matrices (RDM).
117 #
118 # Compared to the approach from Cichy et al. we'll use a multiclass
119 # classifier (Multinomial Logistic Regression) while the paper uses
120 # all pairwise binary classification task to make the RDM.
121 # Also we use here the ROC-AUC as performance metric while the
122 # paper uses accuracy. Finally here for the sake of time we use
123 # RSA on a window of data while Cichy et al. did it for all time
124 # instants separately.
125
126 # Classify using the average signal in the window 50ms to 300ms
127 # to focus the classifier on the time interval with best SNR.
128 clf = make_pipeline(StandardScaler(),
129 LogisticRegression(C=1, solver='lbfgs'))
130 X = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)
131 y = epochs.events[:, 2]
132
133 classes = set(y)
134 cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
135
136 # Compute confusion matrix for each cross-validation fold
137 y_pred = np.zeros((len(y), len(classes)))
138 for train, test in cv.split(X, y):
139 # Fit
140 clf.fit(X[train], y[train])
141 # Probabilistic prediction (necessary for ROC-AUC scoring metric)
142 y_pred[test] = clf.predict_proba(X[test])
143
144 ##############################################################################
145 # Compute confusion matrix using ROC-AUC
146 confusion = np.zeros((len(classes), len(classes)))
147 for ii, train_class in enumerate(classes):
148 for jj in range(ii, len(classes)):
149 confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])
150 confusion[jj, ii] = confusion[ii, jj]
151
152 ##############################################################################
153 # Plot
154 labels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6
155 fig, ax = plt.subplots(1)
156 im = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])
157 ax.set_yticks(range(len(classes)))
158 ax.set_yticklabels(labels)
159 ax.set_xticks(range(len(classes)))
160 ax.set_xticklabels(labels, rotation=40, ha='left')
161 ax.axhline(11.5, color='k')
162 ax.axvline(11.5, color='k')
163 plt.colorbar(im)
164 plt.tight_layout()
165 plt.show()
166
167 ##############################################################################
168 # Confusion matrix related to mental representations have been historically
169 # summarized with dimensionality reduction using multi-dimensional scaling [1].
170 # See how the face samples cluster together.
171 fig, ax = plt.subplots(1)
172 mds = MDS(2, random_state=0, dissimilarity='precomputed')
173 chance = 0.5
174 summary = mds.fit_transform(chance - confusion)
175 cmap = plt.get_cmap('rainbow')
176 colors = ['r', 'b']
177 names = list(conds['condition'].values)
178 for color, name in zip(colors, set(names)):
179 sel = np.where([this_name == name for this_name in names])[0]
180 size = 500 if name == 'human face' else 100
181 ax.scatter(summary[sel, 0], summary[sel, 1], s=size,
182 facecolors=color, label=name, edgecolors='k')
183 ax.axis('off')
184 ax.legend(loc='lower right', scatterpoints=1, ncol=2)
185 plt.tight_layout()
186 plt.show()
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/decoding/decoding_rsa.py b/examples/decoding/decoding_rsa.py
--- a/examples/decoding/decoding_rsa.py
+++ b/examples/decoding/decoding_rsa.py
@@ -97,7 +97,6 @@
events = mne.find_events(raw, min_duration=.002)
events = events[events[:, 2] <= max_trigger]
-mne.viz.plot_events(events, sfreq=raw.info['sfreq'])
##############################################################################
# Epoch data
|
{"golden_diff": "diff --git a/examples/decoding/decoding_rsa.py b/examples/decoding/decoding_rsa.py\n--- a/examples/decoding/decoding_rsa.py\n+++ b/examples/decoding/decoding_rsa.py\n@@ -97,7 +97,6 @@\n events = mne.find_events(raw, min_duration=.002)\n \n events = events[events[:, 2] <= max_trigger]\n-mne.viz.plot_events(events, sfreq=raw.info['sfreq'])\n \n ##############################################################################\n # Epoch data\n", "issue": "UserWarning in decoding_rsa example\nRunning this example, I get the following warning \r\n \r\n decoding_rsa.py:94: RuntimeWarning: More events than colors available. You should pass a list of unique colors.\n", "before_files": [{"content": "\"\"\"\n\n.. _rsa_noplot:\n\n====================================\nRepresentational Similarity Analysis\n====================================\n\nRepresentational Similarity Analysis is used to perform summary statistics\non supervised classifications where the number of classes is relatively high.\nIt consists in characterizing the structure of the confusion matrix to infer\nthe similarity between brain responses and serves as a proxy for characterizing\nthe space of mental representations [1]_ [2]_ [3]_.\n\nIn this example, we perform RSA on responses to 24 object images (among\na list of 92 images). Subjects were presented with images of human, animal\nand inanimate objects [4]_. Here we use the 24 unique images of faces\nand body parts.\n\n.. note:: this example will download a very large (~6GB) file, so we will not\n build the images below.\n\nReferences\n----------\n\n.. [1] Shepard, R. \"Multidimensional scaling, tree-fitting, and clustering.\"\n Science 210.4468 (1980): 390-398.\n.. [2] Laakso, A. & Cottrell, G.. \"Content and cluster analysis:\n assessing representational similarity in neural systems.\" Philosophical\n psychology 13.1 (2000): 47-76.\n.. [3] Kriegeskorte, N., Marieke, M., & Bandettini. P. \"Representational\n similarity analysis-connecting the branches of systems neuroscience.\"\n Frontiers in systems neuroscience 2 (2008): 4.\n.. [4] Cichy, R. M., Pantazis, D., & Oliva, A. \"Resolving human object\n recognition in space and time.\" Nature neuroscience (2014): 17(3),\n 455-462.\n\"\"\"\n\n# Authors: Jean-Remi King <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.manifold import MDS\n\nimport mne\nfrom mne.io import read_raw_fif, concatenate_raws\nfrom mne.datasets import visual_92_categories\n\nprint(__doc__)\n\ndata_path = visual_92_categories.data_path()\n\n# Define stimulus - trigger mapping\nfname = op.join(data_path, 'visual_stimuli.csv')\nconds = read_csv(fname)\nprint(conds.head(5))\n\n##############################################################################\n# Let's restrict the number of conditions to speed up computation\nmax_trigger = 24\nconds = conds[:max_trigger] # take only the first 24 rows\n\n##############################################################################\n# Define stimulus - trigger mapping\nconditions = []\nfor c in conds.values:\n cond_tags = list(c[:2])\n cond_tags += [('not-' if i == 0 else '') + conds.columns[k]\n for k, i in enumerate(c[2:], 2)]\n conditions.append('/'.join(map(str, cond_tags)))\nprint(conditions[:10])\n\n##############################################################################\n# Let's make the event_id dictionary\nevent_id = dict(zip(conditions, conds.trigger + 1))\nevent_id['0/human bodypart/human/not-face/animal/natural']\n\n##############################################################################\n# Read MEG data\nn_runs = 4 # 4 for full data (use less to speed up computations)\nfname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')\nraws = [read_raw_fif(fname % block) for block in range(n_runs)]\nraw = concatenate_raws(raws)\n\nevents = mne.find_events(raw, min_duration=.002)\n\nevents = events[events[:, 2] <= max_trigger]\nmne.viz.plot_events(events, sfreq=raw.info['sfreq'])\n\n##############################################################################\n# Epoch data\npicks = mne.pick_types(raw.info, meg=True)\nepochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,\n picks=picks, tmin=-.1, tmax=.500, preload=True)\n\n##############################################################################\n# Let's plot some conditions\nepochs['face'].average().plot()\nepochs['not-face'].average().plot()\n\n##############################################################################\n# Representational Similarity Analysis (RSA) is a neuroimaging-specific\n# appelation to refer to statistics applied to the confusion matrix\n# also referred to as the representational dissimilarity matrices (RDM).\n#\n# Compared to the approach from Cichy et al. we'll use a multiclass\n# classifier (Multinomial Logistic Regression) while the paper uses\n# all pairwise binary classification task to make the RDM.\n# Also we use here the ROC-AUC as performance metric while the\n# paper uses accuracy. Finally here for the sake of time we use\n# RSA on a window of data while Cichy et al. did it for all time\n# instants separately.\n\n# Classify using the average signal in the window 50ms to 300ms\n# to focus the classifier on the time interval with best SNR.\nclf = make_pipeline(StandardScaler(),\n LogisticRegression(C=1, solver='lbfgs'))\nX = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)\ny = epochs.events[:, 2]\n\nclasses = set(y)\ncv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n# Compute confusion matrix for each cross-validation fold\ny_pred = np.zeros((len(y), len(classes)))\nfor train, test in cv.split(X, y):\n # Fit\n clf.fit(X[train], y[train])\n # Probabilistic prediction (necessary for ROC-AUC scoring metric)\n y_pred[test] = clf.predict_proba(X[test])\n\n##############################################################################\n# Compute confusion matrix using ROC-AUC\nconfusion = np.zeros((len(classes), len(classes)))\nfor ii, train_class in enumerate(classes):\n for jj in range(ii, len(classes)):\n confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])\n confusion[jj, ii] = confusion[ii, jj]\n\n##############################################################################\n# Plot\nlabels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6\nfig, ax = plt.subplots(1)\nim = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])\nax.set_yticks(range(len(classes)))\nax.set_yticklabels(labels)\nax.set_xticks(range(len(classes)))\nax.set_xticklabels(labels, rotation=40, ha='left')\nax.axhline(11.5, color='k')\nax.axvline(11.5, color='k')\nplt.colorbar(im)\nplt.tight_layout()\nplt.show()\n\n##############################################################################\n# Confusion matrix related to mental representations have been historically\n# summarized with dimensionality reduction using multi-dimensional scaling [1].\n# See how the face samples cluster together.\nfig, ax = plt.subplots(1)\nmds = MDS(2, random_state=0, dissimilarity='precomputed')\nchance = 0.5\nsummary = mds.fit_transform(chance - confusion)\ncmap = plt.get_cmap('rainbow')\ncolors = ['r', 'b']\nnames = list(conds['condition'].values)\nfor color, name in zip(colors, set(names)):\n sel = np.where([this_name == name for this_name in names])[0]\n size = 500 if name == 'human face' else 100\n ax.scatter(summary[sel, 0], summary[sel, 1], s=size,\n facecolors=color, label=name, edgecolors='k')\nax.axis('off')\nax.legend(loc='lower right', scatterpoints=1, ncol=2)\nplt.tight_layout()\nplt.show()\n", "path": "examples/decoding/decoding_rsa.py"}], "after_files": [{"content": "\"\"\"\n\n.. _rsa_noplot:\n\n====================================\nRepresentational Similarity Analysis\n====================================\n\nRepresentational Similarity Analysis is used to perform summary statistics\non supervised classifications where the number of classes is relatively high.\nIt consists in characterizing the structure of the confusion matrix to infer\nthe similarity between brain responses and serves as a proxy for characterizing\nthe space of mental representations [1]_ [2]_ [3]_.\n\nIn this example, we perform RSA on responses to 24 object images (among\na list of 92 images). Subjects were presented with images of human, animal\nand inanimate objects [4]_. Here we use the 24 unique images of faces\nand body parts.\n\n.. note:: this example will download a very large (~6GB) file, so we will not\n build the images below.\n\nReferences\n----------\n\n.. [1] Shepard, R. \"Multidimensional scaling, tree-fitting, and clustering.\"\n Science 210.4468 (1980): 390-398.\n.. [2] Laakso, A. & Cottrell, G.. \"Content and cluster analysis:\n assessing representational similarity in neural systems.\" Philosophical\n psychology 13.1 (2000): 47-76.\n.. [3] Kriegeskorte, N., Marieke, M., & Bandettini. P. \"Representational\n similarity analysis-connecting the branches of systems neuroscience.\"\n Frontiers in systems neuroscience 2 (2008): 4.\n.. [4] Cichy, R. M., Pantazis, D., & Oliva, A. \"Resolving human object\n recognition in space and time.\" Nature neuroscience (2014): 17(3),\n 455-462.\n\"\"\"\n\n# Authors: Jean-Remi King <[email protected]>\n# Jaakko Leppakangas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport numpy as np\nfrom pandas import read_csv\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.manifold import MDS\n\nimport mne\nfrom mne.io import read_raw_fif, concatenate_raws\nfrom mne.datasets import visual_92_categories\n\nprint(__doc__)\n\ndata_path = visual_92_categories.data_path()\n\n# Define stimulus - trigger mapping\nfname = op.join(data_path, 'visual_stimuli.csv')\nconds = read_csv(fname)\nprint(conds.head(5))\n\n##############################################################################\n# Let's restrict the number of conditions to speed up computation\nmax_trigger = 24\nconds = conds[:max_trigger] # take only the first 24 rows\n\n##############################################################################\n# Define stimulus - trigger mapping\nconditions = []\nfor c in conds.values:\n cond_tags = list(c[:2])\n cond_tags += [('not-' if i == 0 else '') + conds.columns[k]\n for k, i in enumerate(c[2:], 2)]\n conditions.append('/'.join(map(str, cond_tags)))\nprint(conditions[:10])\n\n##############################################################################\n# Let's make the event_id dictionary\nevent_id = dict(zip(conditions, conds.trigger + 1))\nevent_id['0/human bodypart/human/not-face/animal/natural']\n\n##############################################################################\n# Read MEG data\nn_runs = 4 # 4 for full data (use less to speed up computations)\nfname = op.join(data_path, 'sample_subject_%i_tsss_mc.fif')\nraws = [read_raw_fif(fname % block) for block in range(n_runs)]\nraw = concatenate_raws(raws)\n\nevents = mne.find_events(raw, min_duration=.002)\n\nevents = events[events[:, 2] <= max_trigger]\n\n##############################################################################\n# Epoch data\npicks = mne.pick_types(raw.info, meg=True)\nepochs = mne.Epochs(raw, events=events, event_id=event_id, baseline=None,\n picks=picks, tmin=-.1, tmax=.500, preload=True)\n\n##############################################################################\n# Let's plot some conditions\nepochs['face'].average().plot()\nepochs['not-face'].average().plot()\n\n##############################################################################\n# Representational Similarity Analysis (RSA) is a neuroimaging-specific\n# appelation to refer to statistics applied to the confusion matrix\n# also referred to as the representational dissimilarity matrices (RDM).\n#\n# Compared to the approach from Cichy et al. we'll use a multiclass\n# classifier (Multinomial Logistic Regression) while the paper uses\n# all pairwise binary classification task to make the RDM.\n# Also we use here the ROC-AUC as performance metric while the\n# paper uses accuracy. Finally here for the sake of time we use\n# RSA on a window of data while Cichy et al. did it for all time\n# instants separately.\n\n# Classify using the average signal in the window 50ms to 300ms\n# to focus the classifier on the time interval with best SNR.\nclf = make_pipeline(StandardScaler(),\n LogisticRegression(C=1, solver='lbfgs'))\nX = epochs.copy().crop(0.05, 0.3).get_data().mean(axis=2)\ny = epochs.events[:, 2]\n\nclasses = set(y)\ncv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n# Compute confusion matrix for each cross-validation fold\ny_pred = np.zeros((len(y), len(classes)))\nfor train, test in cv.split(X, y):\n # Fit\n clf.fit(X[train], y[train])\n # Probabilistic prediction (necessary for ROC-AUC scoring metric)\n y_pred[test] = clf.predict_proba(X[test])\n\n##############################################################################\n# Compute confusion matrix using ROC-AUC\nconfusion = np.zeros((len(classes), len(classes)))\nfor ii, train_class in enumerate(classes):\n for jj in range(ii, len(classes)):\n confusion[ii, jj] = roc_auc_score(y == train_class, y_pred[:, jj])\n confusion[jj, ii] = confusion[ii, jj]\n\n##############################################################################\n# Plot\nlabels = [''] * 5 + ['face'] + [''] * 11 + ['bodypart'] + [''] * 6\nfig, ax = plt.subplots(1)\nim = ax.matshow(confusion, cmap='RdBu_r', clim=[0.3, 0.7])\nax.set_yticks(range(len(classes)))\nax.set_yticklabels(labels)\nax.set_xticks(range(len(classes)))\nax.set_xticklabels(labels, rotation=40, ha='left')\nax.axhline(11.5, color='k')\nax.axvline(11.5, color='k')\nplt.colorbar(im)\nplt.tight_layout()\nplt.show()\n\n##############################################################################\n# Confusion matrix related to mental representations have been historically\n# summarized with dimensionality reduction using multi-dimensional scaling [1].\n# See how the face samples cluster together.\nfig, ax = plt.subplots(1)\nmds = MDS(2, random_state=0, dissimilarity='precomputed')\nchance = 0.5\nsummary = mds.fit_transform(chance - confusion)\ncmap = plt.get_cmap('rainbow')\ncolors = ['r', 'b']\nnames = list(conds['condition'].values)\nfor color, name in zip(colors, set(names)):\n sel = np.where([this_name == name for this_name in names])[0]\n size = 500 if name == 'human face' else 100\n ax.scatter(summary[sel, 0], summary[sel, 1], s=size,\n facecolors=color, label=name, edgecolors='k')\nax.axis('off')\nax.legend(loc='lower right', scatterpoints=1, ncol=2)\nplt.tight_layout()\nplt.show()\n", "path": "examples/decoding/decoding_rsa.py"}]}
| 2,552 | 113 |
gh_patches_debug_2979
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-6426
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid HTML for select element
This html is generated by the Python form code.
template:
https://github.com/pypa/warehouse/blob/master/warehouse/templates/manage/roles.html
field:
`{{ form.role_name }}`
ERROR: The first child “option” element of a “select” element with a “required” attribute, and without a “multiple” attribute, and without a “size” attribute whose value is greater than “1”, must have either an empty “value” attribute, or must have no text content. Consider either adding a placeholder option label, or adding a “size” attribute with a value equal to the number of “option” elements. (433)
Reference:
https://maxdesign.com.au/articles/select-required/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/manage/forms.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import json
14
15 import wtforms
16
17 import warehouse.utils.otp as otp
18 import warehouse.utils.webauthn as webauthn
19
20 from warehouse import forms
21 from warehouse.accounts.forms import (
22 NewEmailMixin,
23 NewPasswordMixin,
24 PasswordMixin,
25 TOTPValueMixin,
26 WebAuthnCredentialMixin,
27 )
28
29
30 class RoleNameMixin:
31
32 role_name = wtforms.SelectField(
33 "Select role",
34 choices=[("Maintainer", "Maintainer"), ("Owner", "Owner")],
35 validators=[wtforms.validators.DataRequired(message="Select role")],
36 )
37
38
39 class UsernameMixin:
40
41 username = wtforms.StringField(
42 validators=[wtforms.validators.DataRequired(message="Specify username")]
43 )
44
45 def validate_username(self, field):
46 userid = self.user_service.find_userid(field.data)
47
48 if userid is None:
49 raise wtforms.validators.ValidationError(
50 "No user found with that username. Try again."
51 )
52
53
54 class CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):
55 def __init__(self, *args, user_service, **kwargs):
56 super().__init__(*args, **kwargs)
57 self.user_service = user_service
58
59
60 class ChangeRoleForm(RoleNameMixin, forms.Form):
61 pass
62
63
64 class SaveAccountForm(forms.Form):
65
66 __params__ = ["name"]
67
68 name = wtforms.StringField()
69
70
71 class AddEmailForm(NewEmailMixin, forms.Form):
72
73 __params__ = ["email"]
74
75 def __init__(self, *args, user_service, user_id, **kwargs):
76 super().__init__(*args, **kwargs)
77 self.user_service = user_service
78 self.user_id = user_id
79
80
81 class ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):
82
83 __params__ = ["password", "new_password", "password_confirm"]
84
85 def __init__(self, *args, user_service, **kwargs):
86 super().__init__(*args, **kwargs)
87 self.user_service = user_service
88
89
90 class DeleteTOTPForm(UsernameMixin, forms.Form):
91
92 __params__ = ["confirm_username"]
93
94 def __init__(self, *args, user_service, **kwargs):
95 super().__init__(*args, **kwargs)
96 self.user_service = user_service
97
98
99 class ProvisionTOTPForm(TOTPValueMixin, forms.Form):
100
101 __params__ = ["totp_value"]
102
103 def __init__(self, *args, totp_secret, **kwargs):
104 super().__init__(*args, **kwargs)
105 self.totp_secret = totp_secret
106
107 def validate_totp_value(self, field):
108 totp_value = field.data.encode("utf8")
109 if not otp.verify_totp(self.totp_secret, totp_value):
110 raise wtforms.validators.ValidationError("Invalid TOTP code. Try again?")
111
112
113 class DeleteWebAuthnForm(forms.Form):
114 __params__ = ["confirm_device_name"]
115
116 label = wtforms.StringField(
117 validators=[
118 wtforms.validators.DataRequired(message="Specify a device name"),
119 wtforms.validators.Length(
120 max=64, message=("Label must be 64 characters or less")
121 ),
122 ]
123 )
124
125 def __init__(self, *args, user_service, user_id, **kwargs):
126 super().__init__(*args, **kwargs)
127 self.user_service = user_service
128 self.user_id = user_id
129
130 def validate_label(self, field):
131 label = field.data
132
133 webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)
134 if webauthn is None:
135 raise wtforms.validators.ValidationError("No WebAuthn key with given label")
136 self.webauthn = webauthn
137
138
139 class ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):
140 __params__ = ["label", "credential"]
141
142 label = wtforms.StringField(
143 validators=[
144 wtforms.validators.DataRequired(message="Specify a label"),
145 wtforms.validators.Length(
146 max=64, message=("Label must be 64 characters or less")
147 ),
148 ]
149 )
150
151 def __init__(
152 self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs
153 ):
154 super().__init__(*args, **kwargs)
155 self.user_service = user_service
156 self.user_id = user_id
157 self.challenge = challenge
158 self.rp_id = rp_id
159 self.origin = origin
160
161 def validate_credential(self, field):
162 try:
163 credential_dict = json.loads(field.data.encode("utf8"))
164 except json.JSONDecodeError:
165 raise wtforms.validators.ValidationError(
166 "Invalid WebAuthn credential: Bad payload"
167 )
168
169 try:
170 validated_credential = self.user_service.verify_webauthn_credential(
171 credential_dict,
172 challenge=self.challenge,
173 rp_id=self.rp_id,
174 origin=self.origin,
175 )
176 except webauthn.RegistrationRejectedException as e:
177 raise wtforms.validators.ValidationError(str(e))
178
179 self.validated_credential = validated_credential
180
181 def validate_label(self, field):
182 label = field.data
183
184 if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:
185 raise wtforms.validators.ValidationError(f"Label '{label}' already in use")
186
187
188 class CreateMacaroonForm(forms.Form):
189 __params__ = ["description", "token_scope"]
190
191 def __init__(self, *args, user_id, macaroon_service, project_names, **kwargs):
192 super().__init__(*args, **kwargs)
193 self.user_id = user_id
194 self.macaroon_service = macaroon_service
195 self.project_names = project_names
196
197 description = wtforms.StringField(
198 validators=[
199 wtforms.validators.DataRequired(message="Specify a token name"),
200 wtforms.validators.Length(
201 max=100, message="Description must be 100 characters or less"
202 ),
203 ]
204 )
205
206 token_scope = wtforms.StringField(
207 validators=[wtforms.validators.DataRequired(message="Specify the token scope")]
208 )
209
210 def validate_description(self, field):
211 description = field.data
212
213 if (
214 self.macaroon_service.get_macaroon_by_description(self.user_id, description)
215 is not None
216 ):
217 raise wtforms.validators.ValidationError("API token name already in use")
218
219 def validate_token_scope(self, field):
220 scope = field.data
221
222 try:
223 _, scope_kind = scope.split(":", 1)
224 except ValueError:
225 raise wtforms.ValidationError(f"Unknown token scope: {scope}")
226
227 if scope_kind == "unspecified":
228 raise wtforms.ValidationError(f"Specify the token scope")
229
230 if scope_kind == "user":
231 self.validated_scope = scope_kind
232 return
233
234 try:
235 scope_kind, scope_value = scope_kind.split(":", 1)
236 except ValueError:
237 raise wtforms.ValidationError(f"Unknown token scope: {scope}")
238
239 if scope_kind != "project":
240 raise wtforms.ValidationError(f"Unknown token scope: {scope}")
241 if scope_value not in self.project_names:
242 raise wtforms.ValidationError(
243 f"Unknown or invalid project name: {scope_value}"
244 )
245
246 self.validated_scope = {"projects": [scope_value]}
247
248
249 class DeleteMacaroonForm(forms.Form):
250 __params__ = ["macaroon_id"]
251
252 macaroon_id = wtforms.StringField(
253 validators=[wtforms.validators.DataRequired(message="Identifier required")]
254 )
255
256 def __init__(self, *args, macaroon_service, **kwargs):
257 super().__init__(*args, **kwargs)
258 self.macaroon_service = macaroon_service
259
260 def validate_macaroon_id(self, field):
261 macaroon_id = field.data
262 if self.macaroon_service.find_macaroon(macaroon_id) is None:
263 raise wtforms.validators.ValidationError("No such macaroon")
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py
--- a/warehouse/manage/forms.py
+++ b/warehouse/manage/forms.py
@@ -31,7 +31,7 @@
role_name = wtforms.SelectField(
"Select role",
- choices=[("Maintainer", "Maintainer"), ("Owner", "Owner")],
+ choices=[("", "Select role"), ("Maintainer", "Maintainer"), ("Owner", "Owner")],
validators=[wtforms.validators.DataRequired(message="Select role")],
)
|
{"golden_diff": "diff --git a/warehouse/manage/forms.py b/warehouse/manage/forms.py\n--- a/warehouse/manage/forms.py\n+++ b/warehouse/manage/forms.py\n@@ -31,7 +31,7 @@\n \n role_name = wtforms.SelectField(\n \"Select role\",\n- choices=[(\"Maintainer\", \"Maintainer\"), (\"Owner\", \"Owner\")],\n+ choices=[(\"\", \"Select role\"), (\"Maintainer\", \"Maintainer\"), (\"Owner\", \"Owner\")],\n validators=[wtforms.validators.DataRequired(message=\"Select role\")],\n )\n", "issue": "Invalid HTML for select element\nThis html is generated by the Python form code.\r\n\r\ntemplate:\r\nhttps://github.com/pypa/warehouse/blob/master/warehouse/templates/manage/roles.html\r\n\r\nfield:\r\n`{{ form.role_name }}`\r\n\r\nERROR: The first child \u201coption\u201d element of a \u201cselect\u201d element with a \u201crequired\u201d attribute, and without a \u201cmultiple\u201d attribute, and without a \u201csize\u201d attribute whose value is greater than \u201c1\u201d, must have either an empty \u201cvalue\u201d attribute, or must have no text content. Consider either adding a placeholder option label, or adding a \u201csize\u201d attribute with a value equal to the number of \u201coption\u201d elements. (433)\r\n\r\nReference:\r\nhttps://maxdesign.com.au/articles/select-required/\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport wtforms\n\nimport warehouse.utils.otp as otp\nimport warehouse.utils.webauthn as webauthn\n\nfrom warehouse import forms\nfrom warehouse.accounts.forms import (\n NewEmailMixin,\n NewPasswordMixin,\n PasswordMixin,\n TOTPValueMixin,\n WebAuthnCredentialMixin,\n)\n\n\nclass RoleNameMixin:\n\n role_name = wtforms.SelectField(\n \"Select role\",\n choices=[(\"Maintainer\", \"Maintainer\"), (\"Owner\", \"Owner\")],\n validators=[wtforms.validators.DataRequired(message=\"Select role\")],\n )\n\n\nclass UsernameMixin:\n\n username = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Specify username\")]\n )\n\n def validate_username(self, field):\n userid = self.user_service.find_userid(field.data)\n\n if userid is None:\n raise wtforms.validators.ValidationError(\n \"No user found with that username. Try again.\"\n )\n\n\nclass CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ChangeRoleForm(RoleNameMixin, forms.Form):\n pass\n\n\nclass SaveAccountForm(forms.Form):\n\n __params__ = [\"name\"]\n\n name = wtforms.StringField()\n\n\nclass AddEmailForm(NewEmailMixin, forms.Form):\n\n __params__ = [\"email\"]\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n\nclass ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):\n\n __params__ = [\"password\", \"new_password\", \"password_confirm\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass DeleteTOTPForm(UsernameMixin, forms.Form):\n\n __params__ = [\"confirm_username\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ProvisionTOTPForm(TOTPValueMixin, forms.Form):\n\n __params__ = [\"totp_value\"]\n\n def __init__(self, *args, totp_secret, **kwargs):\n super().__init__(*args, **kwargs)\n self.totp_secret = totp_secret\n\n def validate_totp_value(self, field):\n totp_value = field.data.encode(\"utf8\")\n if not otp.verify_totp(self.totp_secret, totp_value):\n raise wtforms.validators.ValidationError(\"Invalid TOTP code. Try again?\")\n\n\nclass DeleteWebAuthnForm(forms.Form):\n __params__ = [\"confirm_device_name\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a device name\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n def validate_label(self, field):\n label = field.data\n\n webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)\n if webauthn is None:\n raise wtforms.validators.ValidationError(\"No WebAuthn key with given label\")\n self.webauthn = webauthn\n\n\nclass ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):\n __params__ = [\"label\", \"credential\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a label\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(\n self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n self.challenge = challenge\n self.rp_id = rp_id\n self.origin = origin\n\n def validate_credential(self, field):\n try:\n credential_dict = json.loads(field.data.encode(\"utf8\"))\n except json.JSONDecodeError:\n raise wtforms.validators.ValidationError(\n \"Invalid WebAuthn credential: Bad payload\"\n )\n\n try:\n validated_credential = self.user_service.verify_webauthn_credential(\n credential_dict,\n challenge=self.challenge,\n rp_id=self.rp_id,\n origin=self.origin,\n )\n except webauthn.RegistrationRejectedException as e:\n raise wtforms.validators.ValidationError(str(e))\n\n self.validated_credential = validated_credential\n\n def validate_label(self, field):\n label = field.data\n\n if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:\n raise wtforms.validators.ValidationError(f\"Label '{label}' already in use\")\n\n\nclass CreateMacaroonForm(forms.Form):\n __params__ = [\"description\", \"token_scope\"]\n\n def __init__(self, *args, user_id, macaroon_service, project_names, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_id = user_id\n self.macaroon_service = macaroon_service\n self.project_names = project_names\n\n description = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a token name\"),\n wtforms.validators.Length(\n max=100, message=\"Description must be 100 characters or less\"\n ),\n ]\n )\n\n token_scope = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Specify the token scope\")]\n )\n\n def validate_description(self, field):\n description = field.data\n\n if (\n self.macaroon_service.get_macaroon_by_description(self.user_id, description)\n is not None\n ):\n raise wtforms.validators.ValidationError(\"API token name already in use\")\n\n def validate_token_scope(self, field):\n scope = field.data\n\n try:\n _, scope_kind = scope.split(\":\", 1)\n except ValueError:\n raise wtforms.ValidationError(f\"Unknown token scope: {scope}\")\n\n if scope_kind == \"unspecified\":\n raise wtforms.ValidationError(f\"Specify the token scope\")\n\n if scope_kind == \"user\":\n self.validated_scope = scope_kind\n return\n\n try:\n scope_kind, scope_value = scope_kind.split(\":\", 1)\n except ValueError:\n raise wtforms.ValidationError(f\"Unknown token scope: {scope}\")\n\n if scope_kind != \"project\":\n raise wtforms.ValidationError(f\"Unknown token scope: {scope}\")\n if scope_value not in self.project_names:\n raise wtforms.ValidationError(\n f\"Unknown or invalid project name: {scope_value}\"\n )\n\n self.validated_scope = {\"projects\": [scope_value]}\n\n\nclass DeleteMacaroonForm(forms.Form):\n __params__ = [\"macaroon_id\"]\n\n macaroon_id = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Identifier required\")]\n )\n\n def __init__(self, *args, macaroon_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.macaroon_service = macaroon_service\n\n def validate_macaroon_id(self, field):\n macaroon_id = field.data\n if self.macaroon_service.find_macaroon(macaroon_id) is None:\n raise wtforms.validators.ValidationError(\"No such macaroon\")\n", "path": "warehouse/manage/forms.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nimport wtforms\n\nimport warehouse.utils.otp as otp\nimport warehouse.utils.webauthn as webauthn\n\nfrom warehouse import forms\nfrom warehouse.accounts.forms import (\n NewEmailMixin,\n NewPasswordMixin,\n PasswordMixin,\n TOTPValueMixin,\n WebAuthnCredentialMixin,\n)\n\n\nclass RoleNameMixin:\n\n role_name = wtforms.SelectField(\n \"Select role\",\n choices=[(\"\", \"Select role\"), (\"Maintainer\", \"Maintainer\"), (\"Owner\", \"Owner\")],\n validators=[wtforms.validators.DataRequired(message=\"Select role\")],\n )\n\n\nclass UsernameMixin:\n\n username = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Specify username\")]\n )\n\n def validate_username(self, field):\n userid = self.user_service.find_userid(field.data)\n\n if userid is None:\n raise wtforms.validators.ValidationError(\n \"No user found with that username. Try again.\"\n )\n\n\nclass CreateRoleForm(RoleNameMixin, UsernameMixin, forms.Form):\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ChangeRoleForm(RoleNameMixin, forms.Form):\n pass\n\n\nclass SaveAccountForm(forms.Form):\n\n __params__ = [\"name\"]\n\n name = wtforms.StringField()\n\n\nclass AddEmailForm(NewEmailMixin, forms.Form):\n\n __params__ = [\"email\"]\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n\nclass ChangePasswordForm(PasswordMixin, NewPasswordMixin, forms.Form):\n\n __params__ = [\"password\", \"new_password\", \"password_confirm\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass DeleteTOTPForm(UsernameMixin, forms.Form):\n\n __params__ = [\"confirm_username\"]\n\n def __init__(self, *args, user_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n\n\nclass ProvisionTOTPForm(TOTPValueMixin, forms.Form):\n\n __params__ = [\"totp_value\"]\n\n def __init__(self, *args, totp_secret, **kwargs):\n super().__init__(*args, **kwargs)\n self.totp_secret = totp_secret\n\n def validate_totp_value(self, field):\n totp_value = field.data.encode(\"utf8\")\n if not otp.verify_totp(self.totp_secret, totp_value):\n raise wtforms.validators.ValidationError(\"Invalid TOTP code. Try again?\")\n\n\nclass DeleteWebAuthnForm(forms.Form):\n __params__ = [\"confirm_device_name\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a device name\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(self, *args, user_service, user_id, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n\n def validate_label(self, field):\n label = field.data\n\n webauthn = self.user_service.get_webauthn_by_label(self.user_id, label)\n if webauthn is None:\n raise wtforms.validators.ValidationError(\"No WebAuthn key with given label\")\n self.webauthn = webauthn\n\n\nclass ProvisionWebAuthnForm(WebAuthnCredentialMixin, forms.Form):\n __params__ = [\"label\", \"credential\"]\n\n label = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a label\"),\n wtforms.validators.Length(\n max=64, message=(\"Label must be 64 characters or less\")\n ),\n ]\n )\n\n def __init__(\n self, *args, user_service, user_id, challenge, rp_id, origin, **kwargs\n ):\n super().__init__(*args, **kwargs)\n self.user_service = user_service\n self.user_id = user_id\n self.challenge = challenge\n self.rp_id = rp_id\n self.origin = origin\n\n def validate_credential(self, field):\n try:\n credential_dict = json.loads(field.data.encode(\"utf8\"))\n except json.JSONDecodeError:\n raise wtforms.validators.ValidationError(\n \"Invalid WebAuthn credential: Bad payload\"\n )\n\n try:\n validated_credential = self.user_service.verify_webauthn_credential(\n credential_dict,\n challenge=self.challenge,\n rp_id=self.rp_id,\n origin=self.origin,\n )\n except webauthn.RegistrationRejectedException as e:\n raise wtforms.validators.ValidationError(str(e))\n\n self.validated_credential = validated_credential\n\n def validate_label(self, field):\n label = field.data\n\n if self.user_service.get_webauthn_by_label(self.user_id, label) is not None:\n raise wtforms.validators.ValidationError(f\"Label '{label}' already in use\")\n\n\nclass CreateMacaroonForm(forms.Form):\n __params__ = [\"description\", \"token_scope\"]\n\n def __init__(self, *args, user_id, macaroon_service, project_names, **kwargs):\n super().__init__(*args, **kwargs)\n self.user_id = user_id\n self.macaroon_service = macaroon_service\n self.project_names = project_names\n\n description = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(message=\"Specify a token name\"),\n wtforms.validators.Length(\n max=100, message=\"Description must be 100 characters or less\"\n ),\n ]\n )\n\n token_scope = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Specify the token scope\")]\n )\n\n def validate_description(self, field):\n description = field.data\n\n if (\n self.macaroon_service.get_macaroon_by_description(self.user_id, description)\n is not None\n ):\n raise wtforms.validators.ValidationError(\"API token name already in use\")\n\n def validate_token_scope(self, field):\n scope = field.data\n\n try:\n _, scope_kind = scope.split(\":\", 1)\n except ValueError:\n raise wtforms.ValidationError(f\"Unknown token scope: {scope}\")\n\n if scope_kind == \"unspecified\":\n raise wtforms.ValidationError(f\"Specify the token scope\")\n\n if scope_kind == \"user\":\n self.validated_scope = scope_kind\n return\n\n try:\n scope_kind, scope_value = scope_kind.split(\":\", 1)\n except ValueError:\n raise wtforms.ValidationError(f\"Unknown token scope: {scope}\")\n\n if scope_kind != \"project\":\n raise wtforms.ValidationError(f\"Unknown token scope: {scope}\")\n if scope_value not in self.project_names:\n raise wtforms.ValidationError(\n f\"Unknown or invalid project name: {scope_value}\"\n )\n\n self.validated_scope = {\"projects\": [scope_value]}\n\n\nclass DeleteMacaroonForm(forms.Form):\n __params__ = [\"macaroon_id\"]\n\n macaroon_id = wtforms.StringField(\n validators=[wtforms.validators.DataRequired(message=\"Identifier required\")]\n )\n\n def __init__(self, *args, macaroon_service, **kwargs):\n super().__init__(*args, **kwargs)\n self.macaroon_service = macaroon_service\n\n def validate_macaroon_id(self, field):\n macaroon_id = field.data\n if self.macaroon_service.find_macaroon(macaroon_id) is None:\n raise wtforms.validators.ValidationError(\"No such macaroon\")\n", "path": "warehouse/manage/forms.py"}]}
| 2,930 | 118 |
gh_patches_debug_4344
|
rasdani/github-patches
|
git_diff
|
google__turbinia-743
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update google-cloud-logging package
We need to use version >= 2 because of dftimewolf dependencies, and we need to use <=2.0.2 for the google cloud error package, but when trying to use that version I get the following:
```
$ turbiniactl -d server
Traceback (most recent call last):
File "/home/aaronpeterson/.local/share/virtualenvs/turbinia-aeSTftCa/bin/turbiniactl", line 11, in <module>
load_entry_point('turbinia', 'console_scripts', 'turbiniactl')()
File "/home/aaronpeterson/src/turbinia/turbinia/turbiniactl.py", line 428, in main
from turbinia.lib import google_cloud
File "/home/aaronpeterson/src/turbinia/turbinia/lib/google_cloud.py", line 33, in <module>
from google.cloud.logging import _helpers
ImportError: cannot import name '_helpers' from 'google.cloud.logging' (/home/aaronpeterson/.local/share/virtualenvs/turbinia-aeSTftCa/lib/python3.8/site-packages/google/cloud/logging/__init__.py)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `turbinia/lib/google_cloud.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2017 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Google Cloud resources library."""
16
17 from __future__ import unicode_literals
18
19 import datetime
20 from datetime import timedelta
21 from turbinia.config import DATETIME_FORMAT
22 import logging
23 import os
24 import json
25
26 from google.cloud import logging as cloud_logging
27 from google.cloud import error_reporting
28 from google.cloud import exceptions
29 from google.api_core import exceptions as google_api_exceptions
30 from googleapiclient.errors import HttpError
31
32 from turbinia import TurbiniaException
33 from google.cloud.logging import _helpers
34 from google.cloud.logging.handlers.transports.background_thread import _Worker
35
36 logger = logging.getLogger('turbinia')
37
38
39 def setup_stackdriver_handler(project_id, origin):
40 """Set up Google Cloud Stackdriver Logging
41
42 The Google Cloud Logging library will attach itself as a
43 handler to the default Python logging module.
44
45 Attributes:
46 project_id: The name of the Google Cloud project.
47 origin: Where the log is originating from.(i.e. server, worker)
48 Raises:
49 TurbiniaException: When an error occurs enabling GCP Stackdriver Logging.
50 """
51
52 # Patching cloud logging to allow custom fields
53 def my_enqueue(
54 self, record, message, resource=None, labels=None, trace=None,
55 span_id=None):
56 queue_entry = {
57 "info": {
58 "message": message,
59 "python_logger": record.name,
60 "origin": origin
61 },
62 "severity": _helpers._normalize_severity(record.levelno),
63 "resource": resource,
64 "labels": labels,
65 "trace": trace,
66 "span_id": span_id,
67 "timestamp": datetime.datetime.utcfromtimestamp(record.created),
68 }
69
70 self._queue.put_nowait(queue_entry)
71
72 _Worker.enqueue = my_enqueue
73
74 try:
75 client = cloud_logging.Client(project=project_id)
76 cloud_handler = cloud_logging.handlers.CloudLoggingHandler(client)
77 logger.addHandler(cloud_handler)
78
79 except exceptions.GoogleCloudError as exception:
80 msg = 'Error enabling Stackdriver Logging: {0:s}'.format(str(exception))
81 raise TurbiniaException(msg)
82
83
84 def setup_stackdriver_traceback(project_id):
85 """Set up Google Cloud Error Reporting
86
87 This method will enable Google Cloud Error Reporting.
88 All exceptions that occur within a Turbinia Task will be logged.
89
90 Attributes:
91 project_id: The name of the Google Cloud project.
92 Raises:
93 TurbiniaException: When an error occurs enabling GCP Error Reporting.
94 """
95 try:
96 client = error_reporting.Client(project=project_id)
97 except exceptions.GoogleCloudError as exception:
98 msg = 'Error enabling GCP Error Reporting: {0:s}'.format(str(exception))
99 raise TurbiniaException(msg)
100 return client
101
102
103 def get_logs(project_id, output_dir=None, days=1, query=None):
104 """Copies stackdriver logs to a local directory.
105
106 Attributes:
107 project_id: The name of the Google Cloud project.
108 output_dir: The directory where logs are stored.
109 query: Query to use to pull stackdriver logs.
110 days: number of days we want history for.
111 Raises:
112 TurbiniaException: When an error happens pulling the logs.
113 """
114 if not query:
115 query = 'jsonPayload.python_logger="turbinia"'
116 start_time = datetime.datetime.now() - timedelta(days=days)
117 start_string = start_time.strftime(DATETIME_FORMAT)
118 complete_query = '{0:s} timestamp>="{1:s}"'.format(query, start_string)
119 if output_dir:
120 file_path = os.path.join(
121 output_dir, 'turbinia_stackdriver_logs_{0:s}.jsonl'.format(
122 datetime.datetime.now().strftime('%s')))
123 output_file = open(file_path, 'w')
124 logger.info('Writing the logs to {0:s}'.format(file_path))
125 try:
126 client = cloud_logging.Client(project=project_id)
127 logger.info(
128 'Collecting the stackdriver logs with the following query: {0:s}'
129 .format(complete_query))
130
131 for entry in client.list_entries(order_by=cloud_logging.DESCENDING,
132 filter_=complete_query):
133 if not output_dir:
134 logger.info(json.dumps(entry.to_api_repr()))
135 else:
136 output_file.write(json.dumps(entry.to_api_repr()))
137 output_file.write('\n')
138 if output_dir:
139 output_file.close()
140 except google_api_exceptions.InvalidArgument as exception:
141 msg = 'Unable to parse query {0!s} with error {1!s}'.format(
142 query, exception)
143 raise TurbiniaException(msg)
144 except HttpError as exception:
145 msg = 'HTTP error querying logs. Make sure you have the right access on the project.{0!s}'.format(
146 exception)
147 raise TurbiniaException(msg)
148 except google_api_exceptions.GoogleAPIError as exception:
149 msg = 'Something went wrong with the API. {0!s}'.format(exception)
150 raise TurbiniaException(msg)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/turbinia/lib/google_cloud.py b/turbinia/lib/google_cloud.py
--- a/turbinia/lib/google_cloud.py
+++ b/turbinia/lib/google_cloud.py
@@ -30,8 +30,8 @@
from googleapiclient.errors import HttpError
from turbinia import TurbiniaException
-from google.cloud.logging import _helpers
-from google.cloud.logging.handlers.transports.background_thread import _Worker
+from google.cloud.logging_v2 import _helpers
+from google.cloud.logging_v2.handlers.transports.background_thread import _Worker
logger = logging.getLogger('turbinia')
|
{"golden_diff": "diff --git a/turbinia/lib/google_cloud.py b/turbinia/lib/google_cloud.py\n--- a/turbinia/lib/google_cloud.py\n+++ b/turbinia/lib/google_cloud.py\n@@ -30,8 +30,8 @@\n from googleapiclient.errors import HttpError\n \n from turbinia import TurbiniaException\n-from google.cloud.logging import _helpers\n-from google.cloud.logging.handlers.transports.background_thread import _Worker\n+from google.cloud.logging_v2 import _helpers\n+from google.cloud.logging_v2.handlers.transports.background_thread import _Worker\n \n logger = logging.getLogger('turbinia')\n", "issue": "Update google-cloud-logging package\nWe need to use version >= 2 because of dftimewolf dependencies, and we need to use <=2.0.2 for the google cloud error package, but when trying to use that version I get the following:\r\n\r\n```\r\n$ turbiniactl -d server\r\nTraceback (most recent call last):\r\n File \"/home/aaronpeterson/.local/share/virtualenvs/turbinia-aeSTftCa/bin/turbiniactl\", line 11, in <module>\r\n load_entry_point('turbinia', 'console_scripts', 'turbiniactl')()\r\n File \"/home/aaronpeterson/src/turbinia/turbinia/turbiniactl.py\", line 428, in main\r\n from turbinia.lib import google_cloud\r\n File \"/home/aaronpeterson/src/turbinia/turbinia/lib/google_cloud.py\", line 33, in <module>\r\n from google.cloud.logging import _helpers\r\nImportError: cannot import name '_helpers' from 'google.cloud.logging' (/home/aaronpeterson/.local/share/virtualenvs/turbinia-aeSTftCa/lib/python3.8/site-packages/google/cloud/logging/__init__.py)\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Google Cloud resources library.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom datetime import timedelta\nfrom turbinia.config import DATETIME_FORMAT\nimport logging\nimport os\nimport json\n\nfrom google.cloud import logging as cloud_logging\nfrom google.cloud import error_reporting\nfrom google.cloud import exceptions\nfrom google.api_core import exceptions as google_api_exceptions\nfrom googleapiclient.errors import HttpError\n\nfrom turbinia import TurbiniaException\nfrom google.cloud.logging import _helpers\nfrom google.cloud.logging.handlers.transports.background_thread import _Worker\n\nlogger = logging.getLogger('turbinia')\n\n\ndef setup_stackdriver_handler(project_id, origin):\n \"\"\"Set up Google Cloud Stackdriver Logging\n\n The Google Cloud Logging library will attach itself as a\n handler to the default Python logging module.\n\n Attributes:\n project_id: The name of the Google Cloud project.\n origin: Where the log is originating from.(i.e. server, worker)\n Raises:\n TurbiniaException: When an error occurs enabling GCP Stackdriver Logging.\n \"\"\"\n\n # Patching cloud logging to allow custom fields\n def my_enqueue(\n self, record, message, resource=None, labels=None, trace=None,\n span_id=None):\n queue_entry = {\n \"info\": {\n \"message\": message,\n \"python_logger\": record.name,\n \"origin\": origin\n },\n \"severity\": _helpers._normalize_severity(record.levelno),\n \"resource\": resource,\n \"labels\": labels,\n \"trace\": trace,\n \"span_id\": span_id,\n \"timestamp\": datetime.datetime.utcfromtimestamp(record.created),\n }\n\n self._queue.put_nowait(queue_entry)\n\n _Worker.enqueue = my_enqueue\n\n try:\n client = cloud_logging.Client(project=project_id)\n cloud_handler = cloud_logging.handlers.CloudLoggingHandler(client)\n logger.addHandler(cloud_handler)\n\n except exceptions.GoogleCloudError as exception:\n msg = 'Error enabling Stackdriver Logging: {0:s}'.format(str(exception))\n raise TurbiniaException(msg)\n\n\ndef setup_stackdriver_traceback(project_id):\n \"\"\"Set up Google Cloud Error Reporting\n\n This method will enable Google Cloud Error Reporting.\n All exceptions that occur within a Turbinia Task will be logged.\n\n Attributes:\n project_id: The name of the Google Cloud project.\n Raises:\n TurbiniaException: When an error occurs enabling GCP Error Reporting.\n \"\"\"\n try:\n client = error_reporting.Client(project=project_id)\n except exceptions.GoogleCloudError as exception:\n msg = 'Error enabling GCP Error Reporting: {0:s}'.format(str(exception))\n raise TurbiniaException(msg)\n return client\n\n\ndef get_logs(project_id, output_dir=None, days=1, query=None):\n \"\"\"Copies stackdriver logs to a local directory.\n\n Attributes:\n project_id: The name of the Google Cloud project.\n output_dir: The directory where logs are stored.\n query: Query to use to pull stackdriver logs. \n days: number of days we want history for.\n Raises:\n TurbiniaException: When an error happens pulling the logs.\n \"\"\"\n if not query:\n query = 'jsonPayload.python_logger=\"turbinia\"'\n start_time = datetime.datetime.now() - timedelta(days=days)\n start_string = start_time.strftime(DATETIME_FORMAT)\n complete_query = '{0:s} timestamp>=\"{1:s}\"'.format(query, start_string)\n if output_dir:\n file_path = os.path.join(\n output_dir, 'turbinia_stackdriver_logs_{0:s}.jsonl'.format(\n datetime.datetime.now().strftime('%s')))\n output_file = open(file_path, 'w')\n logger.info('Writing the logs to {0:s}'.format(file_path))\n try:\n client = cloud_logging.Client(project=project_id)\n logger.info(\n 'Collecting the stackdriver logs with the following query: {0:s}'\n .format(complete_query))\n\n for entry in client.list_entries(order_by=cloud_logging.DESCENDING,\n filter_=complete_query):\n if not output_dir:\n logger.info(json.dumps(entry.to_api_repr()))\n else:\n output_file.write(json.dumps(entry.to_api_repr()))\n output_file.write('\\n')\n if output_dir:\n output_file.close()\n except google_api_exceptions.InvalidArgument as exception:\n msg = 'Unable to parse query {0!s} with error {1!s}'.format(\n query, exception)\n raise TurbiniaException(msg)\n except HttpError as exception:\n msg = 'HTTP error querying logs. Make sure you have the right access on the project.{0!s}'.format(\n exception)\n raise TurbiniaException(msg)\n except google_api_exceptions.GoogleAPIError as exception:\n msg = 'Something went wrong with the API. {0!s}'.format(exception)\n raise TurbiniaException(msg)\n", "path": "turbinia/lib/google_cloud.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Google Cloud resources library.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom datetime import timedelta\nfrom turbinia.config import DATETIME_FORMAT\nimport logging\nimport os\nimport json\n\nfrom google.cloud import logging as cloud_logging\nfrom google.cloud import error_reporting\nfrom google.cloud import exceptions\nfrom google.api_core import exceptions as google_api_exceptions\nfrom googleapiclient.errors import HttpError\n\nfrom turbinia import TurbiniaException\nfrom google.cloud.logging_v2 import _helpers\nfrom google.cloud.logging_v2.handlers.transports.background_thread import _Worker\n\nlogger = logging.getLogger('turbinia')\n\n\ndef setup_stackdriver_handler(project_id, origin):\n \"\"\"Set up Google Cloud Stackdriver Logging\n\n The Google Cloud Logging library will attach itself as a\n handler to the default Python logging module.\n\n Attributes:\n project_id: The name of the Google Cloud project.\n origin: Where the log is originating from.(i.e. server, worker)\n Raises:\n TurbiniaException: When an error occurs enabling GCP Stackdriver Logging.\n \"\"\"\n\n # Patching cloud logging to allow custom fields\n def my_enqueue(\n self, record, message, resource=None, labels=None, trace=None,\n span_id=None):\n queue_entry = {\n \"info\": {\n \"message\": message,\n \"python_logger\": record.name,\n \"origin\": origin\n },\n \"severity\": _helpers._normalize_severity(record.levelno),\n \"resource\": resource,\n \"labels\": labels,\n \"trace\": trace,\n \"span_id\": span_id,\n \"timestamp\": datetime.datetime.utcfromtimestamp(record.created),\n }\n\n self._queue.put_nowait(queue_entry)\n\n _Worker.enqueue = my_enqueue\n\n try:\n client = cloud_logging.Client(project=project_id)\n cloud_handler = cloud_logging.handlers.CloudLoggingHandler(client)\n logger.addHandler(cloud_handler)\n\n except exceptions.GoogleCloudError as exception:\n msg = 'Error enabling Stackdriver Logging: {0:s}'.format(str(exception))\n raise TurbiniaException(msg)\n\n\ndef setup_stackdriver_traceback(project_id):\n \"\"\"Set up Google Cloud Error Reporting\n\n This method will enable Google Cloud Error Reporting.\n All exceptions that occur within a Turbinia Task will be logged.\n\n Attributes:\n project_id: The name of the Google Cloud project.\n Raises:\n TurbiniaException: When an error occurs enabling GCP Error Reporting.\n \"\"\"\n try:\n client = error_reporting.Client(project=project_id)\n except exceptions.GoogleCloudError as exception:\n msg = 'Error enabling GCP Error Reporting: {0:s}'.format(str(exception))\n raise TurbiniaException(msg)\n return client\n\n\ndef get_logs(project_id, output_dir=None, days=1, query=None):\n \"\"\"Copies stackdriver logs to a local directory.\n\n Attributes:\n project_id: The name of the Google Cloud project.\n output_dir: The directory where logs are stored.\n query: Query to use to pull stackdriver logs. \n days: number of days we want history for.\n Raises:\n TurbiniaException: When an error happens pulling the logs.\n \"\"\"\n if not query:\n query = 'jsonPayload.python_logger=\"turbinia\"'\n start_time = datetime.datetime.now() - timedelta(days=days)\n start_string = start_time.strftime(DATETIME_FORMAT)\n complete_query = '{0:s} timestamp>=\"{1:s}\"'.format(query, start_string)\n if output_dir:\n file_path = os.path.join(\n output_dir, 'turbinia_stackdriver_logs_{0:s}.jsonl'.format(\n datetime.datetime.now().strftime('%s')))\n output_file = open(file_path, 'w')\n logger.info('Writing the logs to {0:s}'.format(file_path))\n try:\n client = cloud_logging.Client(project=project_id)\n logger.info(\n 'Collecting the stackdriver logs with the following query: {0:s}'\n .format(complete_query))\n\n for entry in client.list_entries(order_by=cloud_logging.DESCENDING,\n filter_=complete_query):\n if not output_dir:\n logger.info(json.dumps(entry.to_api_repr()))\n else:\n output_file.write(json.dumps(entry.to_api_repr()))\n output_file.write('\\n')\n if output_dir:\n output_file.close()\n except google_api_exceptions.InvalidArgument as exception:\n msg = 'Unable to parse query {0!s} with error {1!s}'.format(\n query, exception)\n raise TurbiniaException(msg)\n except HttpError as exception:\n msg = 'HTTP error querying logs. Make sure you have the right access on the project.{0!s}'.format(\n exception)\n raise TurbiniaException(msg)\n except google_api_exceptions.GoogleAPIError as exception:\n msg = 'Something went wrong with the API. {0!s}'.format(exception)\n raise TurbiniaException(msg)\n", "path": "turbinia/lib/google_cloud.py"}]}
| 2,066 | 133 |
gh_patches_debug_3352
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-3395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `author_email` in setup.py
https://github.com/streamlink/streamlink/blob/08e582580f3411b2de2c368f8b0cc7108264f990/setup.py#L83
@gravyboat
you've registered `[email protected]` a couple of years ago, right? Can this be used instead?
What's the email address of the `streamlink` account on pypi?
https://pypi.org/user/streamlink/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 deps = [
12 "requests>=2.21.0,<3.0",
13 "isodate",
14 "websocket-client",
15 # Support for SOCKS proxies
16 "PySocks!=1.5.7,>=1.5.6",
17 ]
18
19 # for encrypted streams
20 if environ.get("STREAMLINK_USE_PYCRYPTO"):
21 deps.append("pycrypto")
22 else:
23 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
24 deps.append("pycryptodome>=3.4.3,<4")
25
26 # for localization
27 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
28 deps.append("pycountry")
29 else:
30 deps.append("iso-639")
31 deps.append("iso3166")
32
33 # When we build an egg for the Win32 bootstrap we don"t want dependency
34 # information built into it.
35 if environ.get("NO_DEPS"):
36 deps = []
37
38 this_directory = path.abspath(path.dirname(__file__))
39 srcdir = path.join(this_directory, "src/")
40 sys_path.insert(0, srcdir)
41
42 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
43 long_description = f.read()
44
45
46 def is_wheel_for_windows():
47 if "bdist_wheel" in argv:
48 names = ["win32", "win-amd64", "cygwin"]
49 length = len(argv)
50 for pos in range(argv.index("bdist_wheel") + 1, length):
51 if argv[pos] == "--plat-name" and pos + 1 < length:
52 return argv[pos + 1] in names
53 elif argv[pos][:12] == "--plat-name=":
54 return argv[pos][12:] in names
55 return False
56
57
58 entry_points = {
59 "console_scripts": ["streamlink=streamlink_cli.main:main"]
60 }
61
62 if is_wheel_for_windows():
63 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
64
65
66 setup(name="streamlink",
67 version=versioneer.get_version(),
68 cmdclass=versioneer.get_cmdclass(),
69 description="Streamlink is a command-line utility that extracts streams "
70 "from various services and pipes them into a video player of "
71 "choice.",
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 url="https://github.com/streamlink/streamlink",
75 project_urls={
76 "Documentation": "https://streamlink.github.io/",
77 "Tracker": "https://github.com/streamlink/streamlink/issues",
78 "Source": "https://github.com/streamlink/streamlink",
79 "Funding": "https://opencollective.com/streamlink"
80 },
81 author="Streamlink",
82 # temp until we have a mailing list / global email
83 author_email="[email protected]",
84 license="Simplified BSD",
85 packages=find_packages("src"),
86 package_dir={"": "src"},
87 entry_points=entry_points,
88 install_requires=deps,
89 test_suite="tests",
90 python_requires=">=3.6, <4",
91 classifiers=["Development Status :: 5 - Production/Stable",
92 "License :: OSI Approved :: BSD License",
93 "Environment :: Console",
94 "Intended Audience :: End Users/Desktop",
95 "Operating System :: POSIX",
96 "Operating System :: Microsoft :: Windows",
97 "Operating System :: MacOS",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3 :: Only",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Topic :: Internet :: WWW/HTTP",
105 "Topic :: Multimedia :: Sound/Audio",
106 "Topic :: Multimedia :: Video",
107 "Topic :: Utilities"])
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
},
author="Streamlink",
# temp until we have a mailing list / global email
- author_email="[email protected]",
+ author_email="[email protected]",
license="Simplified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n- author_email=\"[email protected]\",\n+ author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n", "issue": "Change `author_email` in setup.py\nhttps://github.com/streamlink/streamlink/blob/08e582580f3411b2de2c368f8b0cc7108264f990/setup.py#L83\r\n\r\n@gravyboat \r\nyou've registered `[email protected]` a couple of years ago, right? Can this be used instead?\r\n\r\nWhat's the email address of the `streamlink` account on pypi?\r\nhttps://pypi.org/user/streamlink/\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
| 1,484 | 102 |
gh_patches_debug_30635
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1848
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document cocotb module variables
The `cocotb` module has some important variables which aren't documented:
- [ ] SIM_NAME
- [ ] SIM_VERSION
- [ ] RANDOM_SEED
- [ ] log
- [X] scheduler
- [ ] regression_manager
- [X] plusargs
- [ ] argv/argc
- [ ] LANGUAGE
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cocotb/__init__.py`
Content:
```
1 # Copyright (c) 2013 Potential Ventures Ltd
2 # Copyright (c) 2013 SolarFlare Communications Inc
3 # All rights reserved.
4
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are met:
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above copyright
10 # notice, this list of conditions and the following disclaimer in the
11 # documentation and/or other materials provided with the distribution.
12 # * Neither the name of Potential Ventures Ltd,
13 # SolarFlare Communications Inc nor the
14 # names of its contributors may be used to endorse or promote products
15 # derived from this software without specific prior written permission.
16
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 """
29 Cocotb is a coroutine, cosimulation framework for writing testbenches in Python.
30
31 See https://docs.cocotb.org for full documentation
32 """
33 import os
34 import sys
35 import logging
36 import threading
37 import random
38 import time
39 import warnings
40
41 import cocotb._os_compat # must appear first, before the first import of cocotb.simulator
42 import cocotb.handle
43 import cocotb.log
44 from cocotb.scheduler import Scheduler
45 from cocotb.regression import RegressionManager
46
47
48 # Things we want in the cocotb namespace
49 from cocotb.decorators import test, coroutine, hook, function, external # noqa: F401
50
51 from ._version import __version__
52
53
54 def _setup_logging():
55 global log
56
57 def _reopen_stream_with_buffering(stream_name):
58 try:
59 if not getattr(sys, stream_name).isatty():
60 setattr(sys, stream_name, os.fdopen(getattr(sys, stream_name).fileno(), 'w', 1))
61 return True
62 return False
63 except Exception as e:
64 return e
65
66 # If stdout/stderr are not TTYs, Python may not have opened them with line
67 # buffering. In that case, try to reopen them with line buffering
68 # explicitly enabled. This ensures that prints such as stack traces always
69 # appear. Continue silently if this fails.
70 _stdout_buffer_result = _reopen_stream_with_buffering('stdout')
71 _stderr_buffer_result = _reopen_stream_with_buffering('stderr')
72
73 # Don't set the logging up until we've attempted to fix the standard IO,
74 # otherwise it will end up connected to the unfixed IO.
75 cocotb.log.default_config()
76 log = logging.getLogger(__name__)
77
78 # we can't log these things until the logging is set up!
79 if _stderr_buffer_result is True:
80 log.debug("Reopened stderr with line buffering")
81 if _stdout_buffer_result is True:
82 log.debug("Reopened stdout with line buffering")
83 if isinstance(_stdout_buffer_result, Exception) or isinstance(_stderr_buffer_result, Exception):
84 if isinstance(_stdout_buffer_result, Exception):
85 log.warning("Failed to ensure that stdout is line buffered", exc_info=_stdout_buffer_result)
86 if isinstance(_stderr_buffer_result, Exception):
87 log.warning("Failed to ensure that stderr is line buffered", exc_info=_stderr_buffer_result)
88 log.warning("Some stack traces may not appear because of this.")
89
90 del _stderr_buffer_result, _stdout_buffer_result
91
92
93 # Singleton scheduler instance
94 # NB this cheekily ensures a singleton since we're replacing the reference
95 # so that cocotb.scheduler gives you the singleton instance and not the
96 # scheduler package
97
98 scheduler = None
99 """The global scheduler instance."""
100
101 regression_manager = None
102
103 plusargs = {}
104 """A dictionary of "plusargs" handed to the simulation."""
105
106
107 def fork(coro):
108 """ Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on it's use. """
109 return scheduler.add(coro)
110
111
112 # FIXME is this really required?
113 _rlock = threading.RLock()
114
115 LANGUAGE = os.getenv("TOPLEVEL_LANG")
116
117
118 def mem_debug(port):
119 import cocotb.memdebug
120 cocotb.memdebug.start(port)
121
122
123 def _initialise_testbench(argv_):
124 """Initialize testbench.
125
126 This function is called after the simulator has elaborated all
127 entities and is ready to run the test.
128
129 The test must be defined by the environment variables
130 :envvar:`MODULE` and :envvar:`TESTCASE`.
131
132 The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a
133 comma-separated list of modules to be executed before the first test.
134 """
135 _rlock.acquire()
136
137 global argc, argv
138 argv = argv_
139 argc = len(argv)
140
141 root_name = os.getenv("TOPLEVEL")
142 if root_name is not None:
143 if root_name == "":
144 root_name = None
145 elif '.' in root_name:
146 # Skip any library component of the toplevel
147 root_name = root_name.split(".", 1)[1]
148
149 # sys.path normally includes "" (the current directory), but does not appear to when python is embedded.
150 # Add it back because users expect to be able to import files in their test directory.
151 # TODO: move this to gpi_embed.cpp
152 sys.path.insert(0, "")
153
154 _setup_logging()
155
156 # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners
157 # If the user doesn't want to see these, they can always change the global
158 # warning settings in their test module.
159 if not sys.warnoptions:
160 warnings.simplefilter("default")
161
162 from cocotb import simulator
163
164 global SIM_NAME, SIM_VERSION
165 SIM_NAME = simulator.get_simulator_product()
166 SIM_VERSION = simulator.get_simulator_version()
167
168 cocotb.log.info("Running on {} version {}".format(SIM_NAME, SIM_VERSION))
169
170 memcheck_port = os.getenv('MEMCHECK')
171 if memcheck_port is not None:
172 mem_debug(int(memcheck_port))
173
174 log.info("Running tests with cocotb v%s from %s" %
175 (__version__, os.path.dirname(__file__)))
176
177 # Create the base handle type
178
179 process_plusargs()
180
181 global scheduler
182 scheduler = Scheduler()
183
184 # Seed the Python random number generator to make this repeatable
185 global RANDOM_SEED
186 RANDOM_SEED = os.getenv('RANDOM_SEED')
187
188 if RANDOM_SEED is None:
189 if 'ntb_random_seed' in plusargs:
190 RANDOM_SEED = eval(plusargs['ntb_random_seed'])
191 elif 'seed' in plusargs:
192 RANDOM_SEED = eval(plusargs['seed'])
193 else:
194 RANDOM_SEED = int(time.time())
195 log.info("Seeding Python random module with %d" % (RANDOM_SEED))
196 else:
197 RANDOM_SEED = int(RANDOM_SEED)
198 log.info("Seeding Python random module with supplied seed %d" % (RANDOM_SEED))
199 random.seed(RANDOM_SEED)
200
201 # Setup DUT object
202 from cocotb import simulator
203
204 handle = simulator.get_root_handle(root_name)
205 if not handle:
206 raise RuntimeError("Can not find root handle ({})".format(root_name))
207
208 dut = cocotb.handle.SimHandle(handle)
209
210 # start Regression Manager
211 global regression_manager
212 regression_manager = RegressionManager.from_discovery(dut)
213 regression_manager.execute()
214
215 _rlock.release()
216 return True
217
218
219 def _sim_event(level, message):
220 """Function that can be called externally to signal an event."""
221 SIM_INFO = 0
222 SIM_TEST_FAIL = 1
223 SIM_FAIL = 2
224 from cocotb.result import TestFailure, SimFailure
225
226 if level is SIM_TEST_FAIL:
227 scheduler.log.error("Failing test at simulator request")
228 scheduler.finish_test(TestFailure("Failure from external source: %s" %
229 message))
230 elif level is SIM_FAIL:
231 # We simply return here as the simulator will exit
232 # so no cleanup is needed
233 msg = ("Failing test at simulator request before test run completion: "
234 "%s" % message)
235 scheduler.log.error(msg)
236 scheduler.finish_scheduler(SimFailure(msg))
237 else:
238 scheduler.log.error("Unsupported sim event")
239
240 return True
241
242
243 def process_plusargs():
244
245 global plusargs
246
247 plusargs = {}
248
249 for option in cocotb.argv:
250 if option.startswith('+'):
251 if option.find('=') != -1:
252 (name, value) = option[1:].split('=')
253 plusargs[name] = value
254 else:
255 plusargs[option[1:]] = True
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cocotb/__init__.py b/cocotb/__init__.py
--- a/cocotb/__init__.py
+++ b/cocotb/__init__.py
@@ -37,6 +37,7 @@
import random
import time
import warnings
+from typing import Dict, List, Union
import cocotb._os_compat # must appear first, before the first import of cocotb.simulator
import cocotb.handle
@@ -95,13 +96,35 @@
# so that cocotb.scheduler gives you the singleton instance and not the
# scheduler package
-scheduler = None
+scheduler = None # type: cocotb.scheduler.Scheduler
"""The global scheduler instance."""
-regression_manager = None
+regression_manager = None # type: cocotb.regression.RegressionManager
+"""The global regression manager instance."""
-plusargs = {}
-"""A dictionary of "plusargs" handed to the simulation."""
+argv = None # type: List[str]
+"""The argument list as seen by the simulator"""
+
+argc = None # type: int
+"""The length of :data:`cocotb.argv`"""
+
+plusargs = None # type: Dict[str, Union[bool, str]]
+"""A dictionary of "plusargs" handed to the simulation. See :make:var:`PLUSARGS` for details."""
+
+LANGUAGE = os.getenv("TOPLEVEL_LANG") # type: str
+"""The value of :make:var:`TOPLEVEL_LANG`"""
+
+SIM_NAME = None # type: str
+"""The running simulator product information. ``None`` if :mod:`cocotb` was not loaded from a simulator"""
+
+SIM_VERSION = None # type: str
+"""The version of the running simulator. ``None`` if :mod:`cocotb` was not loaded from a simulator"""
+
+RANDOM_SEED = None # type: int
+"""
+The value passed to the Python default random number generator.
+See :envvar:`RANDOM_SEED` for details on how the value is computed.
+"""
def fork(coro):
@@ -112,8 +135,6 @@
# FIXME is this really required?
_rlock = threading.RLock()
-LANGUAGE = os.getenv("TOPLEVEL_LANG")
-
def mem_debug(port):
import cocotb.memdebug
|
{"golden_diff": "diff --git a/cocotb/__init__.py b/cocotb/__init__.py\n--- a/cocotb/__init__.py\n+++ b/cocotb/__init__.py\n@@ -37,6 +37,7 @@\n import random\n import time\n import warnings\n+from typing import Dict, List, Union\n \n import cocotb._os_compat # must appear first, before the first import of cocotb.simulator\n import cocotb.handle\n@@ -95,13 +96,35 @@\n # so that cocotb.scheduler gives you the singleton instance and not the\n # scheduler package\n \n-scheduler = None\n+scheduler = None # type: cocotb.scheduler.Scheduler\n \"\"\"The global scheduler instance.\"\"\"\n \n-regression_manager = None\n+regression_manager = None # type: cocotb.regression.RegressionManager\n+\"\"\"The global regression manager instance.\"\"\"\n \n-plusargs = {}\n-\"\"\"A dictionary of \"plusargs\" handed to the simulation.\"\"\"\n+argv = None # type: List[str]\n+\"\"\"The argument list as seen by the simulator\"\"\"\n+\n+argc = None # type: int\n+\"\"\"The length of :data:`cocotb.argv`\"\"\"\n+\n+plusargs = None # type: Dict[str, Union[bool, str]]\n+\"\"\"A dictionary of \"plusargs\" handed to the simulation. See :make:var:`PLUSARGS` for details.\"\"\"\n+\n+LANGUAGE = os.getenv(\"TOPLEVEL_LANG\") # type: str\n+\"\"\"The value of :make:var:`TOPLEVEL_LANG`\"\"\"\n+\n+SIM_NAME = None # type: str\n+\"\"\"The running simulator product information. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n+\n+SIM_VERSION = None # type: str\n+\"\"\"The version of the running simulator. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n+\n+RANDOM_SEED = None # type: int\n+\"\"\"\n+The value passed to the Python default random number generator.\n+See :envvar:`RANDOM_SEED` for details on how the value is computed.\n+\"\"\"\n \n \n def fork(coro):\n@@ -112,8 +135,6 @@\n # FIXME is this really required?\n _rlock = threading.RLock()\n \n-LANGUAGE = os.getenv(\"TOPLEVEL_LANG\")\n-\n \n def mem_debug(port):\n import cocotb.memdebug\n", "issue": "Document cocotb module variables\nThe `cocotb` module has some important variables which aren't documented:\r\n\r\n- [ ] SIM_NAME\r\n- [ ] SIM_VERSION\r\n- [ ] RANDOM_SEED\r\n- [ ] log\r\n- [X] scheduler\r\n- [ ] regression_manager\r\n- [X] plusargs\r\n- [ ] argv/argc\r\n- [ ] LANGUAGE\n", "before_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nCocotb is a coroutine, cosimulation framework for writing testbenches in Python.\n\nSee https://docs.cocotb.org for full documentation\n\"\"\"\nimport os\nimport sys\nimport logging\nimport threading\nimport random\nimport time\nimport warnings\n\nimport cocotb._os_compat # must appear first, before the first import of cocotb.simulator\nimport cocotb.handle\nimport cocotb.log\nfrom cocotb.scheduler import Scheduler\nfrom cocotb.regression import RegressionManager\n\n\n# Things we want in the cocotb namespace\nfrom cocotb.decorators import test, coroutine, hook, function, external # noqa: F401\n\nfrom ._version import __version__\n\n\ndef _setup_logging():\n global log\n\n def _reopen_stream_with_buffering(stream_name):\n try:\n if not getattr(sys, stream_name).isatty():\n setattr(sys, stream_name, os.fdopen(getattr(sys, stream_name).fileno(), 'w', 1))\n return True\n return False\n except Exception as e:\n return e\n\n # If stdout/stderr are not TTYs, Python may not have opened them with line\n # buffering. In that case, try to reopen them with line buffering\n # explicitly enabled. This ensures that prints such as stack traces always\n # appear. Continue silently if this fails.\n _stdout_buffer_result = _reopen_stream_with_buffering('stdout')\n _stderr_buffer_result = _reopen_stream_with_buffering('stderr')\n\n # Don't set the logging up until we've attempted to fix the standard IO,\n # otherwise it will end up connected to the unfixed IO.\n cocotb.log.default_config()\n log = logging.getLogger(__name__)\n\n # we can't log these things until the logging is set up!\n if _stderr_buffer_result is True:\n log.debug(\"Reopened stderr with line buffering\")\n if _stdout_buffer_result is True:\n log.debug(\"Reopened stdout with line buffering\")\n if isinstance(_stdout_buffer_result, Exception) or isinstance(_stderr_buffer_result, Exception):\n if isinstance(_stdout_buffer_result, Exception):\n log.warning(\"Failed to ensure that stdout is line buffered\", exc_info=_stdout_buffer_result)\n if isinstance(_stderr_buffer_result, Exception):\n log.warning(\"Failed to ensure that stderr is line buffered\", exc_info=_stderr_buffer_result)\n log.warning(\"Some stack traces may not appear because of this.\")\n\n del _stderr_buffer_result, _stdout_buffer_result\n\n\n# Singleton scheduler instance\n# NB this cheekily ensures a singleton since we're replacing the reference\n# so that cocotb.scheduler gives you the singleton instance and not the\n# scheduler package\n\nscheduler = None\n\"\"\"The global scheduler instance.\"\"\"\n\nregression_manager = None\n\nplusargs = {}\n\"\"\"A dictionary of \"plusargs\" handed to the simulation.\"\"\"\n\n\ndef fork(coro):\n \"\"\" Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on it's use. \"\"\"\n return scheduler.add(coro)\n\n\n# FIXME is this really required?\n_rlock = threading.RLock()\n\nLANGUAGE = os.getenv(\"TOPLEVEL_LANG\")\n\n\ndef mem_debug(port):\n import cocotb.memdebug\n cocotb.memdebug.start(port)\n\n\ndef _initialise_testbench(argv_):\n \"\"\"Initialize testbench.\n\n This function is called after the simulator has elaborated all\n entities and is ready to run the test.\n\n The test must be defined by the environment variables\n :envvar:`MODULE` and :envvar:`TESTCASE`.\n\n The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a\n comma-separated list of modules to be executed before the first test.\n \"\"\"\n _rlock.acquire()\n\n global argc, argv\n argv = argv_\n argc = len(argv)\n\n root_name = os.getenv(\"TOPLEVEL\")\n if root_name is not None:\n if root_name == \"\":\n root_name = None\n elif '.' in root_name:\n # Skip any library component of the toplevel\n root_name = root_name.split(\".\", 1)[1]\n\n # sys.path normally includes \"\" (the current directory), but does not appear to when python is embedded.\n # Add it back because users expect to be able to import files in their test directory.\n # TODO: move this to gpi_embed.cpp\n sys.path.insert(0, \"\")\n\n _setup_logging()\n\n # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners\n # If the user doesn't want to see these, they can always change the global\n # warning settings in their test module.\n if not sys.warnoptions:\n warnings.simplefilter(\"default\")\n\n from cocotb import simulator\n\n global SIM_NAME, SIM_VERSION\n SIM_NAME = simulator.get_simulator_product()\n SIM_VERSION = simulator.get_simulator_version()\n\n cocotb.log.info(\"Running on {} version {}\".format(SIM_NAME, SIM_VERSION))\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n log.info(\"Running tests with cocotb v%s from %s\" %\n (__version__, os.path.dirname(__file__)))\n\n # Create the base handle type\n\n process_plusargs()\n\n global scheduler\n scheduler = Scheduler()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n # Setup DUT object\n from cocotb import simulator\n\n handle = simulator.get_root_handle(root_name)\n if not handle:\n raise RuntimeError(\"Can not find root handle ({})\".format(root_name))\n\n dut = cocotb.handle.SimHandle(handle)\n\n # start Regression Manager\n global regression_manager\n regression_manager = RegressionManager.from_discovery(dut)\n regression_manager.execute()\n\n _rlock.release()\n return True\n\n\ndef _sim_event(level, message):\n \"\"\"Function that can be called externally to signal an event.\"\"\"\n SIM_INFO = 0\n SIM_TEST_FAIL = 1\n SIM_FAIL = 2\n from cocotb.result import TestFailure, SimFailure\n\n if level is SIM_TEST_FAIL:\n scheduler.log.error(\"Failing test at simulator request\")\n scheduler.finish_test(TestFailure(\"Failure from external source: %s\" %\n message))\n elif level is SIM_FAIL:\n # We simply return here as the simulator will exit\n # so no cleanup is needed\n msg = (\"Failing test at simulator request before test run completion: \"\n \"%s\" % message)\n scheduler.log.error(msg)\n scheduler.finish_scheduler(SimFailure(msg))\n else:\n scheduler.log.error(\"Unsupported sim event\")\n\n return True\n\n\ndef process_plusargs():\n\n global plusargs\n\n plusargs = {}\n\n for option in cocotb.argv:\n if option.startswith('+'):\n if option.find('=') != -1:\n (name, value) = option[1:].split('=')\n plusargs[name] = value\n else:\n plusargs[option[1:]] = True\n", "path": "cocotb/__init__.py"}], "after_files": [{"content": "# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nCocotb is a coroutine, cosimulation framework for writing testbenches in Python.\n\nSee https://docs.cocotb.org for full documentation\n\"\"\"\nimport os\nimport sys\nimport logging\nimport threading\nimport random\nimport time\nimport warnings\nfrom typing import Dict, List, Union\n\nimport cocotb._os_compat # must appear first, before the first import of cocotb.simulator\nimport cocotb.handle\nimport cocotb.log\nfrom cocotb.scheduler import Scheduler\nfrom cocotb.regression import RegressionManager\n\n\n# Things we want in the cocotb namespace\nfrom cocotb.decorators import test, coroutine, hook, function, external # noqa: F401\n\nfrom ._version import __version__\n\n\ndef _setup_logging():\n global log\n\n def _reopen_stream_with_buffering(stream_name):\n try:\n if not getattr(sys, stream_name).isatty():\n setattr(sys, stream_name, os.fdopen(getattr(sys, stream_name).fileno(), 'w', 1))\n return True\n return False\n except Exception as e:\n return e\n\n # If stdout/stderr are not TTYs, Python may not have opened them with line\n # buffering. In that case, try to reopen them with line buffering\n # explicitly enabled. This ensures that prints such as stack traces always\n # appear. Continue silently if this fails.\n _stdout_buffer_result = _reopen_stream_with_buffering('stdout')\n _stderr_buffer_result = _reopen_stream_with_buffering('stderr')\n\n # Don't set the logging up until we've attempted to fix the standard IO,\n # otherwise it will end up connected to the unfixed IO.\n cocotb.log.default_config()\n log = logging.getLogger(__name__)\n\n # we can't log these things until the logging is set up!\n if _stderr_buffer_result is True:\n log.debug(\"Reopened stderr with line buffering\")\n if _stdout_buffer_result is True:\n log.debug(\"Reopened stdout with line buffering\")\n if isinstance(_stdout_buffer_result, Exception) or isinstance(_stderr_buffer_result, Exception):\n if isinstance(_stdout_buffer_result, Exception):\n log.warning(\"Failed to ensure that stdout is line buffered\", exc_info=_stdout_buffer_result)\n if isinstance(_stderr_buffer_result, Exception):\n log.warning(\"Failed to ensure that stderr is line buffered\", exc_info=_stderr_buffer_result)\n log.warning(\"Some stack traces may not appear because of this.\")\n\n del _stderr_buffer_result, _stdout_buffer_result\n\n\n# Singleton scheduler instance\n# NB this cheekily ensures a singleton since we're replacing the reference\n# so that cocotb.scheduler gives you the singleton instance and not the\n# scheduler package\n\nscheduler = None # type: cocotb.scheduler.Scheduler\n\"\"\"The global scheduler instance.\"\"\"\n\nregression_manager = None # type: cocotb.regression.RegressionManager\n\"\"\"The global regression manager instance.\"\"\"\n\nargv = None # type: List[str]\n\"\"\"The argument list as seen by the simulator\"\"\"\n\nargc = None # type: int\n\"\"\"The length of :data:`cocotb.argv`\"\"\"\n\nplusargs = None # type: Dict[str, Union[bool, str]]\n\"\"\"A dictionary of \"plusargs\" handed to the simulation. See :make:var:`PLUSARGS` for details.\"\"\"\n\nLANGUAGE = os.getenv(\"TOPLEVEL_LANG\") # type: str\n\"\"\"The value of :make:var:`TOPLEVEL_LANG`\"\"\"\n\nSIM_NAME = None # type: str\n\"\"\"The running simulator product information. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n\nSIM_VERSION = None # type: str\n\"\"\"The version of the running simulator. ``None`` if :mod:`cocotb` was not loaded from a simulator\"\"\"\n\nRANDOM_SEED = None # type: int\n\"\"\"\nThe value passed to the Python default random number generator.\nSee :envvar:`RANDOM_SEED` for details on how the value is computed.\n\"\"\"\n\n\ndef fork(coro):\n \"\"\" Schedule a coroutine to be run concurrently. See :ref:`coroutines` for details on it's use. \"\"\"\n return scheduler.add(coro)\n\n\n# FIXME is this really required?\n_rlock = threading.RLock()\n\n\ndef mem_debug(port):\n import cocotb.memdebug\n cocotb.memdebug.start(port)\n\n\ndef _initialise_testbench(argv_):\n \"\"\"Initialize testbench.\n\n This function is called after the simulator has elaborated all\n entities and is ready to run the test.\n\n The test must be defined by the environment variables\n :envvar:`MODULE` and :envvar:`TESTCASE`.\n\n The environment variable :envvar:`COCOTB_HOOKS`, if present, contains a\n comma-separated list of modules to be executed before the first test.\n \"\"\"\n _rlock.acquire()\n\n global argc, argv\n argv = argv_\n argc = len(argv)\n\n root_name = os.getenv(\"TOPLEVEL\")\n if root_name is not None:\n if root_name == \"\":\n root_name = None\n elif '.' in root_name:\n # Skip any library component of the toplevel\n root_name = root_name.split(\".\", 1)[1]\n\n # sys.path normally includes \"\" (the current directory), but does not appear to when python is embedded.\n # Add it back because users expect to be able to import files in their test directory.\n # TODO: move this to gpi_embed.cpp\n sys.path.insert(0, \"\")\n\n _setup_logging()\n\n # From https://www.python.org/dev/peps/pep-0565/#recommended-filter-settings-for-test-runners\n # If the user doesn't want to see these, they can always change the global\n # warning settings in their test module.\n if not sys.warnoptions:\n warnings.simplefilter(\"default\")\n\n from cocotb import simulator\n\n global SIM_NAME, SIM_VERSION\n SIM_NAME = simulator.get_simulator_product()\n SIM_VERSION = simulator.get_simulator_version()\n\n cocotb.log.info(\"Running on {} version {}\".format(SIM_NAME, SIM_VERSION))\n\n memcheck_port = os.getenv('MEMCHECK')\n if memcheck_port is not None:\n mem_debug(int(memcheck_port))\n\n log.info(\"Running tests with cocotb v%s from %s\" %\n (__version__, os.path.dirname(__file__)))\n\n # Create the base handle type\n\n process_plusargs()\n\n global scheduler\n scheduler = Scheduler()\n\n # Seed the Python random number generator to make this repeatable\n global RANDOM_SEED\n RANDOM_SEED = os.getenv('RANDOM_SEED')\n\n if RANDOM_SEED is None:\n if 'ntb_random_seed' in plusargs:\n RANDOM_SEED = eval(plusargs['ntb_random_seed'])\n elif 'seed' in plusargs:\n RANDOM_SEED = eval(plusargs['seed'])\n else:\n RANDOM_SEED = int(time.time())\n log.info(\"Seeding Python random module with %d\" % (RANDOM_SEED))\n else:\n RANDOM_SEED = int(RANDOM_SEED)\n log.info(\"Seeding Python random module with supplied seed %d\" % (RANDOM_SEED))\n random.seed(RANDOM_SEED)\n\n # Setup DUT object\n from cocotb import simulator\n\n handle = simulator.get_root_handle(root_name)\n if not handle:\n raise RuntimeError(\"Can not find root handle ({})\".format(root_name))\n\n dut = cocotb.handle.SimHandle(handle)\n\n # start Regression Manager\n global regression_manager\n regression_manager = RegressionManager.from_discovery(dut)\n regression_manager.execute()\n\n _rlock.release()\n return True\n\n\ndef _sim_event(level, message):\n \"\"\"Function that can be called externally to signal an event.\"\"\"\n SIM_INFO = 0\n SIM_TEST_FAIL = 1\n SIM_FAIL = 2\n from cocotb.result import TestFailure, SimFailure\n\n if level is SIM_TEST_FAIL:\n scheduler.log.error(\"Failing test at simulator request\")\n scheduler.finish_test(TestFailure(\"Failure from external source: %s\" %\n message))\n elif level is SIM_FAIL:\n # We simply return here as the simulator will exit\n # so no cleanup is needed\n msg = (\"Failing test at simulator request before test run completion: \"\n \"%s\" % message)\n scheduler.log.error(msg)\n scheduler.finish_scheduler(SimFailure(msg))\n else:\n scheduler.log.error(\"Unsupported sim event\")\n\n return True\n\n\ndef process_plusargs():\n\n global plusargs\n\n plusargs = {}\n\n for option in cocotb.argv:\n if option.startswith('+'):\n if option.find('=') != -1:\n (name, value) = option[1:].split('=')\n plusargs[name] = value\n else:\n plusargs[option[1:]] = True\n", "path": "cocotb/__init__.py"}]}
| 3,048 | 528 |
gh_patches_debug_35919
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-1424
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lutris takes extremely long time to parse very long string keys in registry
If user.reg contains very long (we a talking megabytes long here) string key it takes a very long time to start anything in this wineprefix with lutris. With ~15MB key lutris was taking ~30 minutes. It happens before starting wine, in "preparing to start" phase.
P.S. If you ask how does this happens - Battletech ny HBS writes some analytics into string key in registry. And it only adds to it, never cleans.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/wine/registry.py`
Content:
```
1 import os
2 import re
3 from collections import OrderedDict
4 from datetime import datetime
5 from lutris.util.log import logger
6 from lutris.util import system
7 from lutris.util.wine.wine import WINE_DEFAULT_ARCH
8
9 (
10 REG_NONE,
11 REG_SZ,
12 REG_EXPAND_SZ,
13 REG_BINARY,
14 REG_DWORD,
15 REG_DWORD_BIG_ENDIAN,
16 REG_LINK,
17 REG_MULTI_SZ,
18 ) = range(8)
19
20 DATA_TYPES = {
21 '"': REG_SZ,
22 'str:"': REG_SZ,
23 'str(2):"': REG_EXPAND_SZ,
24 'str(7):"': REG_MULTI_SZ,
25 "hex": REG_BINARY,
26 "dword": REG_DWORD,
27 }
28
29
30 class WindowsFileTime:
31 """Utility class to deal with Windows FILETIME structures.
32
33 See: https://msdn.microsoft.com/en-us/library/ms724284(v=vs.85).aspx
34 """
35
36 ticks_per_seconds = 10000000 # 1 tick every 100 nanoseconds
37 epoch_delta = 11644473600 # 3600 * 24 * ((1970 - 1601) * 365 + 89)
38
39 def __init__(self, timestamp=None):
40 self.timestamp = timestamp
41
42 def __repr__(self):
43 return "<{}>: {}".format(self.__class__.__name__, self.timestamp)
44
45 @classmethod
46 def from_hex(cls, hexvalue):
47 timestamp = int(hexvalue, 16)
48 return WindowsFileTime(timestamp)
49
50 def to_hex(self):
51 return "{:x}".format(self.timestamp)
52
53 @classmethod
54 def from_unix_timestamp(cls, timestamp):
55 timestamp = timestamp + cls.epoch_delta
56 timestamp = int(timestamp * cls.ticks_per_seconds)
57 return WindowsFileTime(timestamp)
58
59 def to_unix_timestamp(self):
60 if not self.timestamp:
61 raise ValueError("No timestamp set")
62 unix_ts = self.timestamp / self.ticks_per_seconds
63 unix_ts = unix_ts - self.epoch_delta
64 return unix_ts
65
66 def to_date_time(self):
67 return datetime.fromtimestamp(self.to_unix_timestamp())
68
69
70 class WineRegistry:
71 version_header = "WINE REGISTRY Version "
72 relative_to_header = ";; All keys relative to "
73
74 def __init__(self, reg_filename=None):
75 self.arch = WINE_DEFAULT_ARCH
76 self.version = 2
77 self.relative_to = "\\\\User\\\\S-1-5-21-0-0-0-1000"
78 self.keys = OrderedDict()
79 self.reg_filename = reg_filename
80 if reg_filename:
81 if not system.path_exists(reg_filename):
82 logger.error("Unexisting registry %s", reg_filename)
83 self.parse_reg_file(reg_filename)
84
85 @property
86 def prefix_path(self):
87 """Return the Wine prefix path (where the .reg files are located)"""
88 if self.reg_filename:
89 return os.path.dirname(self.reg_filename)
90
91 @staticmethod
92 def get_raw_registry(reg_filename):
93 """Return an array of the unprocessed contents of a registry file"""
94 if not system.path_exists(reg_filename):
95 return []
96 with open(reg_filename, "r") as reg_file:
97
98 try:
99 registry_content = reg_file.readlines()
100 except Exception: # pylint: disable=broad-except
101 logger.exception(
102 "Failed to registry read %s, please send attach this file in a bug report",
103 reg_filename
104 )
105 registry_content = []
106 return registry_content
107
108 def parse_reg_file(self, reg_filename):
109 registry_lines = self.get_raw_registry(reg_filename)
110 current_key = None
111 add_next_to_value = False
112 for line in registry_lines:
113 line = line.rstrip("\n") # Remove trailing newlines
114
115 if line.startswith(self.version_header):
116 self.version = int(line[len(self.version_header):])
117 continue
118
119 if line.startswith(self.relative_to_header):
120 self.relative_to = line[len(self.relative_to_header):]
121 continue
122
123 if line.startswith("#arch"):
124 self.arch = line.split("=")[1]
125 continue
126
127 if line.startswith("["):
128 current_key = WineRegistryKey(key_def=line)
129 self.keys[current_key.name] = current_key
130 continue
131
132 if current_key:
133 if add_next_to_value:
134 current_key.add_to_last(line)
135 else:
136 current_key.parse(line)
137 add_next_to_value = line.endswith("\\")
138
139 def render(self):
140 content = "{}{}\n".format(self.version_header, self.version)
141 content += "{}{}\n\n".format(self.relative_to_header, self.relative_to)
142 content += "#arch={}\n".format(self.arch)
143 for key in self.keys:
144 content += "\n"
145 content += self.keys[key].render()
146 return content
147
148 def save(self, path=None):
149 """Write the registry to a file"""
150 if not path:
151 path = self.reg_filename
152 if not path:
153 raise OSError("No filename provided")
154 with open(path, "w") as registry_file:
155 registry_file.write(self.render())
156
157 def query(self, path, subkey):
158 key = self.keys.get(path)
159 if key:
160 return key.get_subkey(subkey)
161
162 def set_value(self, path, subkey, value):
163 key = self.keys.get(path)
164 if not key:
165 key = WineRegistryKey(path=path)
166 self.keys[key.name] = key
167 key.set_subkey(subkey, value)
168
169 def clear_key(self, path):
170 """Removes all subkeys from a key"""
171 key = self.keys.get(path)
172 if not key:
173 return
174 key.subkeys.clear()
175
176 def clear_subkeys(self, path, keys):
177 """Remove some subkeys from a key"""
178 key = self.keys.get(path)
179 if not key:
180 return
181 for subkey in list(key.subkeys.keys()):
182 if subkey not in keys:
183 continue
184 key.subkeys.pop(subkey)
185
186 def get_unix_path(self, windows_path):
187 windows_path = windows_path.replace("\\\\", "/")
188 if not self.prefix_path:
189 return
190 drives_path = os.path.join(self.prefix_path, "dosdevices")
191 if not system.path_exists(drives_path):
192 return
193 letter, relpath = windows_path.split(":", 1)
194 relpath = relpath.strip("/")
195 drive_link = os.path.join(drives_path, letter.lower() + ":")
196 try:
197 drive_path = os.readlink(drive_link)
198 except FileNotFoundError:
199 logger.error("Unable to read link for %s", drive_link)
200 return
201
202 if not os.path.isabs(drive_path):
203 drive_path = os.path.join(drives_path, drive_path)
204 return os.path.join(drive_path, relpath)
205
206
207 class WineRegistryKey:
208 def __init__(self, key_def=None, path=None):
209
210 self.subkeys = OrderedDict()
211 self.metas = OrderedDict()
212
213 if path:
214 # Key is created by path, it's a new key
215 timestamp = datetime.now().timestamp()
216 self.name = path
217 self.raw_name = "[{}]".format(path.replace("/", "\\\\"))
218 self.raw_timestamp = " ".join(str(timestamp).split("."))
219
220 windows_timestamp = WindowsFileTime.from_unix_timestamp(timestamp)
221 self.metas["time"] = windows_timestamp.to_hex()
222 else:
223 # Existing key loaded from file
224 self.raw_name, self.raw_timestamp = re.split(
225 re.compile(r"(?<=[^\\]\]) "), key_def, maxsplit=1
226 )
227 self.name = self.raw_name.replace("\\\\", "/").strip("[]")
228
229 # Parse timestamp either as int or float
230 ts_parts = self.raw_timestamp.strip().split()
231 if len(ts_parts) == 1:
232 self.timestamp = int(ts_parts[0])
233 else:
234 self.timestamp = float("{}.{}".format(ts_parts[0], ts_parts[1]))
235
236 def __str__(self):
237 return "{0} {1}".format(self.raw_name, self.raw_timestamp)
238
239 def parse(self, line):
240 """Parse a registry line, populating meta and subkeys"""
241 if len(line) < 4:
242 # Line is too short, nothing to parse
243 return
244
245 if line.startswith("#"):
246 self.add_meta(line)
247 elif line.startswith('"'):
248 try:
249 key, value = re.split(re.compile(r"(?<![^\\]\\\")="), line, maxsplit=1)
250 except ValueError as ex:
251 logger.error("Unable to parse line %s", line)
252 logger.exception(ex)
253 return
254 key = key[1:-1]
255 self.subkeys[key] = value
256 elif line.startswith("@"):
257 key, value = line.split("=", 1)
258 self.subkeys["default"] = value
259
260 def add_to_last(self, line):
261 last_subkey = list(self.subkeys.keys())[-1]
262 self.subkeys[last_subkey] += "\n{}".format(line)
263
264 def render(self):
265 """Return the content of the key in the wine .reg format"""
266 content = self.raw_name + " " + self.raw_timestamp + "\n"
267 for key, value in self.metas.items():
268 if value is None:
269 content += "#{}\n".format(key)
270 else:
271 content += "#{}={}\n".format(key, value)
272 for key, value in self.subkeys.items():
273 if key == "default":
274 key = "@"
275 else:
276 key = '"{}"'.format(key)
277 content += "{}={}\n".format(key, value)
278 return content
279
280 def render_value(self, value):
281 if isinstance(value, int):
282 return "dword:{:08x}".format(value)
283 if isinstance(value, str):
284 return '"{}"'.format(value)
285 raise NotImplementedError("TODO")
286
287 def add_meta(self, meta_line):
288 if not meta_line.startswith("#"):
289 raise ValueError("Key metas should start with '#'")
290 meta_line = meta_line[1:]
291 parts = meta_line.split("=")
292 if len(parts) == 2:
293 key = parts[0]
294 value = parts[1]
295 elif len(parts) == 1:
296 key = parts[0]
297 value = None
298 else:
299 raise ValueError("Invalid meta line '{}'".format(meta_line))
300 self.metas[key] = value
301
302 def get_meta(self, name):
303 return self.metas.get(name)
304
305 def set_subkey(self, name, value):
306 self.subkeys[name] = self.render_value(value)
307
308 def get_subkey(self, name):
309 if name not in self.subkeys:
310 return None
311 value = self.subkeys[name]
312 if value.startswith('"') and value.endswith('"'):
313 return value[1:-1]
314 if value.startswith("dword:"):
315 return int(value[6:], 16)
316 raise ValueError("Handle %s" % value)
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lutris/util/wine/registry.py b/lutris/util/wine/registry.py
--- a/lutris/util/wine/registry.py
+++ b/lutris/util/wine/registry.py
@@ -109,32 +109,29 @@
registry_lines = self.get_raw_registry(reg_filename)
current_key = None
add_next_to_value = False
+ additional_values = []
for line in registry_lines:
- line = line.rstrip("\n") # Remove trailing newlines
-
- if line.startswith(self.version_header):
- self.version = int(line[len(self.version_header):])
- continue
-
- if line.startswith(self.relative_to_header):
- self.relative_to = line[len(self.relative_to_header):]
- continue
-
- if line.startswith("#arch"):
- self.arch = line.split("=")[1]
- continue
-
- if line.startswith("["):
- current_key = WineRegistryKey(key_def=line)
- self.keys[current_key.name] = current_key
- continue
+ line = line.rstrip("\n")
if current_key:
if add_next_to_value:
- current_key.add_to_last(line)
- else:
+ additional_values.append(line)
+ elif not add_next_to_value:
+ if additional_values:
+ additional_values = '\n'.join(additional_values)
+ current_key.add_to_last(additional_values)
+ additional_values = []
current_key.parse(line)
add_next_to_value = line.endswith("\\")
+ elif line.startswith("["):
+ current_key = WineRegistryKey(key_def=line)
+ self.keys[current_key.name] = current_key
+ elif line.startswith(self.version_header):
+ self.version = int(line[len(self.version_header):])
+ elif line.startswith(self.relative_to_header):
+ self.relative_to = line[len(self.relative_to_header):]
+ elif line.startswith("#arch"):
+ self.arch = line.split("=")[1]
def render(self):
content = "{}{}\n".format(self.version_header, self.version)
@@ -258,7 +255,7 @@
self.subkeys["default"] = value
def add_to_last(self, line):
- last_subkey = list(self.subkeys.keys())[-1]
+ last_subkey = next(reversed(self.subkeys))
self.subkeys[last_subkey] += "\n{}".format(line)
def render(self):
|
{"golden_diff": "diff --git a/lutris/util/wine/registry.py b/lutris/util/wine/registry.py\n--- a/lutris/util/wine/registry.py\n+++ b/lutris/util/wine/registry.py\n@@ -109,32 +109,29 @@\n registry_lines = self.get_raw_registry(reg_filename)\n current_key = None\n add_next_to_value = False\n+ additional_values = []\n for line in registry_lines:\n- line = line.rstrip(\"\\n\") # Remove trailing newlines\n-\n- if line.startswith(self.version_header):\n- self.version = int(line[len(self.version_header):])\n- continue\n-\n- if line.startswith(self.relative_to_header):\n- self.relative_to = line[len(self.relative_to_header):]\n- continue\n-\n- if line.startswith(\"#arch\"):\n- self.arch = line.split(\"=\")[1]\n- continue\n-\n- if line.startswith(\"[\"):\n- current_key = WineRegistryKey(key_def=line)\n- self.keys[current_key.name] = current_key\n- continue\n+ line = line.rstrip(\"\\n\")\n \n if current_key:\n if add_next_to_value:\n- current_key.add_to_last(line)\n- else:\n+ additional_values.append(line)\n+ elif not add_next_to_value:\n+ if additional_values:\n+ additional_values = '\\n'.join(additional_values)\n+ current_key.add_to_last(additional_values)\n+ additional_values = []\n current_key.parse(line)\n add_next_to_value = line.endswith(\"\\\\\")\n+ elif line.startswith(\"[\"):\n+ current_key = WineRegistryKey(key_def=line)\n+ self.keys[current_key.name] = current_key\n+ elif line.startswith(self.version_header):\n+ self.version = int(line[len(self.version_header):])\n+ elif line.startswith(self.relative_to_header):\n+ self.relative_to = line[len(self.relative_to_header):]\n+ elif line.startswith(\"#arch\"):\n+ self.arch = line.split(\"=\")[1]\n \n def render(self):\n content = \"{}{}\\n\".format(self.version_header, self.version)\n@@ -258,7 +255,7 @@\n self.subkeys[\"default\"] = value\n \n def add_to_last(self, line):\n- last_subkey = list(self.subkeys.keys())[-1]\n+ last_subkey = next(reversed(self.subkeys))\n self.subkeys[last_subkey] += \"\\n{}\".format(line)\n \n def render(self):\n", "issue": "Lutris takes extremely long time to parse very long string keys in registry\nIf user.reg contains very long (we a talking megabytes long here) string key it takes a very long time to start anything in this wineprefix with lutris. With ~15MB key lutris was taking ~30 minutes. It happens before starting wine, in \"preparing to start\" phase.\r\n\r\nP.S. If you ask how does this happens - Battletech ny HBS writes some analytics into string key in registry. And it only adds to it, never cleans.\n", "before_files": [{"content": "import os\nimport re\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom lutris.util.log import logger\nfrom lutris.util import system\nfrom lutris.util.wine.wine import WINE_DEFAULT_ARCH\n\n(\n REG_NONE,\n REG_SZ,\n REG_EXPAND_SZ,\n REG_BINARY,\n REG_DWORD,\n REG_DWORD_BIG_ENDIAN,\n REG_LINK,\n REG_MULTI_SZ,\n) = range(8)\n\nDATA_TYPES = {\n '\"': REG_SZ,\n 'str:\"': REG_SZ,\n 'str(2):\"': REG_EXPAND_SZ,\n 'str(7):\"': REG_MULTI_SZ,\n \"hex\": REG_BINARY,\n \"dword\": REG_DWORD,\n}\n\n\nclass WindowsFileTime:\n \"\"\"Utility class to deal with Windows FILETIME structures.\n\n See: https://msdn.microsoft.com/en-us/library/ms724284(v=vs.85).aspx\n \"\"\"\n\n ticks_per_seconds = 10000000 # 1 tick every 100 nanoseconds\n epoch_delta = 11644473600 # 3600 * 24 * ((1970 - 1601) * 365 + 89)\n\n def __init__(self, timestamp=None):\n self.timestamp = timestamp\n\n def __repr__(self):\n return \"<{}>: {}\".format(self.__class__.__name__, self.timestamp)\n\n @classmethod\n def from_hex(cls, hexvalue):\n timestamp = int(hexvalue, 16)\n return WindowsFileTime(timestamp)\n\n def to_hex(self):\n return \"{:x}\".format(self.timestamp)\n\n @classmethod\n def from_unix_timestamp(cls, timestamp):\n timestamp = timestamp + cls.epoch_delta\n timestamp = int(timestamp * cls.ticks_per_seconds)\n return WindowsFileTime(timestamp)\n\n def to_unix_timestamp(self):\n if not self.timestamp:\n raise ValueError(\"No timestamp set\")\n unix_ts = self.timestamp / self.ticks_per_seconds\n unix_ts = unix_ts - self.epoch_delta\n return unix_ts\n\n def to_date_time(self):\n return datetime.fromtimestamp(self.to_unix_timestamp())\n\n\nclass WineRegistry:\n version_header = \"WINE REGISTRY Version \"\n relative_to_header = \";; All keys relative to \"\n\n def __init__(self, reg_filename=None):\n self.arch = WINE_DEFAULT_ARCH\n self.version = 2\n self.relative_to = \"\\\\\\\\User\\\\\\\\S-1-5-21-0-0-0-1000\"\n self.keys = OrderedDict()\n self.reg_filename = reg_filename\n if reg_filename:\n if not system.path_exists(reg_filename):\n logger.error(\"Unexisting registry %s\", reg_filename)\n self.parse_reg_file(reg_filename)\n\n @property\n def prefix_path(self):\n \"\"\"Return the Wine prefix path (where the .reg files are located)\"\"\"\n if self.reg_filename:\n return os.path.dirname(self.reg_filename)\n\n @staticmethod\n def get_raw_registry(reg_filename):\n \"\"\"Return an array of the unprocessed contents of a registry file\"\"\"\n if not system.path_exists(reg_filename):\n return []\n with open(reg_filename, \"r\") as reg_file:\n\n try:\n registry_content = reg_file.readlines()\n except Exception: # pylint: disable=broad-except\n logger.exception(\n \"Failed to registry read %s, please send attach this file in a bug report\",\n reg_filename\n )\n registry_content = []\n return registry_content\n\n def parse_reg_file(self, reg_filename):\n registry_lines = self.get_raw_registry(reg_filename)\n current_key = None\n add_next_to_value = False\n for line in registry_lines:\n line = line.rstrip(\"\\n\") # Remove trailing newlines\n\n if line.startswith(self.version_header):\n self.version = int(line[len(self.version_header):])\n continue\n\n if line.startswith(self.relative_to_header):\n self.relative_to = line[len(self.relative_to_header):]\n continue\n\n if line.startswith(\"#arch\"):\n self.arch = line.split(\"=\")[1]\n continue\n\n if line.startswith(\"[\"):\n current_key = WineRegistryKey(key_def=line)\n self.keys[current_key.name] = current_key\n continue\n\n if current_key:\n if add_next_to_value:\n current_key.add_to_last(line)\n else:\n current_key.parse(line)\n add_next_to_value = line.endswith(\"\\\\\")\n\n def render(self):\n content = \"{}{}\\n\".format(self.version_header, self.version)\n content += \"{}{}\\n\\n\".format(self.relative_to_header, self.relative_to)\n content += \"#arch={}\\n\".format(self.arch)\n for key in self.keys:\n content += \"\\n\"\n content += self.keys[key].render()\n return content\n\n def save(self, path=None):\n \"\"\"Write the registry to a file\"\"\"\n if not path:\n path = self.reg_filename\n if not path:\n raise OSError(\"No filename provided\")\n with open(path, \"w\") as registry_file:\n registry_file.write(self.render())\n\n def query(self, path, subkey):\n key = self.keys.get(path)\n if key:\n return key.get_subkey(subkey)\n\n def set_value(self, path, subkey, value):\n key = self.keys.get(path)\n if not key:\n key = WineRegistryKey(path=path)\n self.keys[key.name] = key\n key.set_subkey(subkey, value)\n\n def clear_key(self, path):\n \"\"\"Removes all subkeys from a key\"\"\"\n key = self.keys.get(path)\n if not key:\n return\n key.subkeys.clear()\n\n def clear_subkeys(self, path, keys):\n \"\"\"Remove some subkeys from a key\"\"\"\n key = self.keys.get(path)\n if not key:\n return\n for subkey in list(key.subkeys.keys()):\n if subkey not in keys:\n continue\n key.subkeys.pop(subkey)\n\n def get_unix_path(self, windows_path):\n windows_path = windows_path.replace(\"\\\\\\\\\", \"/\")\n if not self.prefix_path:\n return\n drives_path = os.path.join(self.prefix_path, \"dosdevices\")\n if not system.path_exists(drives_path):\n return\n letter, relpath = windows_path.split(\":\", 1)\n relpath = relpath.strip(\"/\")\n drive_link = os.path.join(drives_path, letter.lower() + \":\")\n try:\n drive_path = os.readlink(drive_link)\n except FileNotFoundError:\n logger.error(\"Unable to read link for %s\", drive_link)\n return\n\n if not os.path.isabs(drive_path):\n drive_path = os.path.join(drives_path, drive_path)\n return os.path.join(drive_path, relpath)\n\n\nclass WineRegistryKey:\n def __init__(self, key_def=None, path=None):\n\n self.subkeys = OrderedDict()\n self.metas = OrderedDict()\n\n if path:\n # Key is created by path, it's a new key\n timestamp = datetime.now().timestamp()\n self.name = path\n self.raw_name = \"[{}]\".format(path.replace(\"/\", \"\\\\\\\\\"))\n self.raw_timestamp = \" \".join(str(timestamp).split(\".\"))\n\n windows_timestamp = WindowsFileTime.from_unix_timestamp(timestamp)\n self.metas[\"time\"] = windows_timestamp.to_hex()\n else:\n # Existing key loaded from file\n self.raw_name, self.raw_timestamp = re.split(\n re.compile(r\"(?<=[^\\\\]\\]) \"), key_def, maxsplit=1\n )\n self.name = self.raw_name.replace(\"\\\\\\\\\", \"/\").strip(\"[]\")\n\n # Parse timestamp either as int or float\n ts_parts = self.raw_timestamp.strip().split()\n if len(ts_parts) == 1:\n self.timestamp = int(ts_parts[0])\n else:\n self.timestamp = float(\"{}.{}\".format(ts_parts[0], ts_parts[1]))\n\n def __str__(self):\n return \"{0} {1}\".format(self.raw_name, self.raw_timestamp)\n\n def parse(self, line):\n \"\"\"Parse a registry line, populating meta and subkeys\"\"\"\n if len(line) < 4:\n # Line is too short, nothing to parse\n return\n\n if line.startswith(\"#\"):\n self.add_meta(line)\n elif line.startswith('\"'):\n try:\n key, value = re.split(re.compile(r\"(?<![^\\\\]\\\\\\\")=\"), line, maxsplit=1)\n except ValueError as ex:\n logger.error(\"Unable to parse line %s\", line)\n logger.exception(ex)\n return\n key = key[1:-1]\n self.subkeys[key] = value\n elif line.startswith(\"@\"):\n key, value = line.split(\"=\", 1)\n self.subkeys[\"default\"] = value\n\n def add_to_last(self, line):\n last_subkey = list(self.subkeys.keys())[-1]\n self.subkeys[last_subkey] += \"\\n{}\".format(line)\n\n def render(self):\n \"\"\"Return the content of the key in the wine .reg format\"\"\"\n content = self.raw_name + \" \" + self.raw_timestamp + \"\\n\"\n for key, value in self.metas.items():\n if value is None:\n content += \"#{}\\n\".format(key)\n else:\n content += \"#{}={}\\n\".format(key, value)\n for key, value in self.subkeys.items():\n if key == \"default\":\n key = \"@\"\n else:\n key = '\"{}\"'.format(key)\n content += \"{}={}\\n\".format(key, value)\n return content\n\n def render_value(self, value):\n if isinstance(value, int):\n return \"dword:{:08x}\".format(value)\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n raise NotImplementedError(\"TODO\")\n\n def add_meta(self, meta_line):\n if not meta_line.startswith(\"#\"):\n raise ValueError(\"Key metas should start with '#'\")\n meta_line = meta_line[1:]\n parts = meta_line.split(\"=\")\n if len(parts) == 2:\n key = parts[0]\n value = parts[1]\n elif len(parts) == 1:\n key = parts[0]\n value = None\n else:\n raise ValueError(\"Invalid meta line '{}'\".format(meta_line))\n self.metas[key] = value\n\n def get_meta(self, name):\n return self.metas.get(name)\n\n def set_subkey(self, name, value):\n self.subkeys[name] = self.render_value(value)\n\n def get_subkey(self, name):\n if name not in self.subkeys:\n return None\n value = self.subkeys[name]\n if value.startswith('\"') and value.endswith('\"'):\n return value[1:-1]\n if value.startswith(\"dword:\"):\n return int(value[6:], 16)\n raise ValueError(\"Handle %s\" % value)\n", "path": "lutris/util/wine/registry.py"}], "after_files": [{"content": "import os\nimport re\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom lutris.util.log import logger\nfrom lutris.util import system\nfrom lutris.util.wine.wine import WINE_DEFAULT_ARCH\n\n(\n REG_NONE,\n REG_SZ,\n REG_EXPAND_SZ,\n REG_BINARY,\n REG_DWORD,\n REG_DWORD_BIG_ENDIAN,\n REG_LINK,\n REG_MULTI_SZ,\n) = range(8)\n\nDATA_TYPES = {\n '\"': REG_SZ,\n 'str:\"': REG_SZ,\n 'str(2):\"': REG_EXPAND_SZ,\n 'str(7):\"': REG_MULTI_SZ,\n \"hex\": REG_BINARY,\n \"dword\": REG_DWORD,\n}\n\n\nclass WindowsFileTime:\n \"\"\"Utility class to deal with Windows FILETIME structures.\n\n See: https://msdn.microsoft.com/en-us/library/ms724284(v=vs.85).aspx\n \"\"\"\n\n ticks_per_seconds = 10000000 # 1 tick every 100 nanoseconds\n epoch_delta = 11644473600 # 3600 * 24 * ((1970 - 1601) * 365 + 89)\n\n def __init__(self, timestamp=None):\n self.timestamp = timestamp\n\n def __repr__(self):\n return \"<{}>: {}\".format(self.__class__.__name__, self.timestamp)\n\n @classmethod\n def from_hex(cls, hexvalue):\n timestamp = int(hexvalue, 16)\n return WindowsFileTime(timestamp)\n\n def to_hex(self):\n return \"{:x}\".format(self.timestamp)\n\n @classmethod\n def from_unix_timestamp(cls, timestamp):\n timestamp = timestamp + cls.epoch_delta\n timestamp = int(timestamp * cls.ticks_per_seconds)\n return WindowsFileTime(timestamp)\n\n def to_unix_timestamp(self):\n if not self.timestamp:\n raise ValueError(\"No timestamp set\")\n unix_ts = self.timestamp / self.ticks_per_seconds\n unix_ts = unix_ts - self.epoch_delta\n return unix_ts\n\n def to_date_time(self):\n return datetime.fromtimestamp(self.to_unix_timestamp())\n\n\nclass WineRegistry:\n version_header = \"WINE REGISTRY Version \"\n relative_to_header = \";; All keys relative to \"\n\n def __init__(self, reg_filename=None):\n self.arch = WINE_DEFAULT_ARCH\n self.version = 2\n self.relative_to = \"\\\\\\\\User\\\\\\\\S-1-5-21-0-0-0-1000\"\n self.keys = OrderedDict()\n self.reg_filename = reg_filename\n if reg_filename:\n if not system.path_exists(reg_filename):\n logger.error(\"Unexisting registry %s\", reg_filename)\n self.parse_reg_file(reg_filename)\n\n @property\n def prefix_path(self):\n \"\"\"Return the Wine prefix path (where the .reg files are located)\"\"\"\n if self.reg_filename:\n return os.path.dirname(self.reg_filename)\n\n @staticmethod\n def get_raw_registry(reg_filename):\n \"\"\"Return an array of the unprocessed contents of a registry file\"\"\"\n if not system.path_exists(reg_filename):\n return []\n with open(reg_filename, \"r\") as reg_file:\n\n try:\n registry_content = reg_file.readlines()\n except Exception: # pylint: disable=broad-except\n logger.exception(\n \"Failed to registry read %s, please send attach this file in a bug report\",\n reg_filename\n )\n registry_content = []\n return registry_content\n\n def parse_reg_file(self, reg_filename):\n registry_lines = self.get_raw_registry(reg_filename)\n current_key = None\n add_next_to_value = False\n additional_values = []\n for line in registry_lines:\n line = line.rstrip(\"\\n\")\n\n if current_key:\n if add_next_to_value:\n additional_values.append(line)\n elif not add_next_to_value:\n if additional_values:\n additional_values = '\\n'.join(additional_values)\n current_key.add_to_last(additional_values)\n additional_values = []\n current_key.parse(line)\n add_next_to_value = line.endswith(\"\\\\\")\n elif line.startswith(\"[\"):\n current_key = WineRegistryKey(key_def=line)\n self.keys[current_key.name] = current_key\n elif line.startswith(self.version_header):\n self.version = int(line[len(self.version_header):])\n elif line.startswith(self.relative_to_header):\n self.relative_to = line[len(self.relative_to_header):]\n elif line.startswith(\"#arch\"):\n self.arch = line.split(\"=\")[1]\n\n def render(self):\n content = \"{}{}\\n\".format(self.version_header, self.version)\n content += \"{}{}\\n\\n\".format(self.relative_to_header, self.relative_to)\n content += \"#arch={}\\n\".format(self.arch)\n for key in self.keys:\n content += \"\\n\"\n content += self.keys[key].render()\n return content\n\n def save(self, path=None):\n \"\"\"Write the registry to a file\"\"\"\n if not path:\n path = self.reg_filename\n if not path:\n raise OSError(\"No filename provided\")\n with open(path, \"w\") as registry_file:\n registry_file.write(self.render())\n\n def query(self, path, subkey):\n key = self.keys.get(path)\n if key:\n return key.get_subkey(subkey)\n\n def set_value(self, path, subkey, value):\n key = self.keys.get(path)\n if not key:\n key = WineRegistryKey(path=path)\n self.keys[key.name] = key\n key.set_subkey(subkey, value)\n\n def clear_key(self, path):\n \"\"\"Removes all subkeys from a key\"\"\"\n key = self.keys.get(path)\n if not key:\n return\n key.subkeys.clear()\n\n def clear_subkeys(self, path, keys):\n \"\"\"Remove some subkeys from a key\"\"\"\n key = self.keys.get(path)\n if not key:\n return\n for subkey in list(key.subkeys.keys()):\n if subkey not in keys:\n continue\n key.subkeys.pop(subkey)\n\n def get_unix_path(self, windows_path):\n windows_path = windows_path.replace(\"\\\\\\\\\", \"/\")\n if not self.prefix_path:\n return\n drives_path = os.path.join(self.prefix_path, \"dosdevices\")\n if not system.path_exists(drives_path):\n return\n letter, relpath = windows_path.split(\":\", 1)\n relpath = relpath.strip(\"/\")\n drive_link = os.path.join(drives_path, letter.lower() + \":\")\n try:\n drive_path = os.readlink(drive_link)\n except FileNotFoundError:\n logger.error(\"Unable to read link for %s\", drive_link)\n return\n\n if not os.path.isabs(drive_path):\n drive_path = os.path.join(drives_path, drive_path)\n return os.path.join(drive_path, relpath)\n\n\nclass WineRegistryKey:\n def __init__(self, key_def=None, path=None):\n\n self.subkeys = OrderedDict()\n self.metas = OrderedDict()\n\n if path:\n # Key is created by path, it's a new key\n timestamp = datetime.now().timestamp()\n self.name = path\n self.raw_name = \"[{}]\".format(path.replace(\"/\", \"\\\\\\\\\"))\n self.raw_timestamp = \" \".join(str(timestamp).split(\".\"))\n\n windows_timestamp = WindowsFileTime.from_unix_timestamp(timestamp)\n self.metas[\"time\"] = windows_timestamp.to_hex()\n else:\n # Existing key loaded from file\n self.raw_name, self.raw_timestamp = re.split(\n re.compile(r\"(?<=[^\\\\]\\]) \"), key_def, maxsplit=1\n )\n self.name = self.raw_name.replace(\"\\\\\\\\\", \"/\").strip(\"[]\")\n\n # Parse timestamp either as int or float\n ts_parts = self.raw_timestamp.strip().split()\n if len(ts_parts) == 1:\n self.timestamp = int(ts_parts[0])\n else:\n self.timestamp = float(\"{}.{}\".format(ts_parts[0], ts_parts[1]))\n\n def __str__(self):\n return \"{0} {1}\".format(self.raw_name, self.raw_timestamp)\n\n def parse(self, line):\n \"\"\"Parse a registry line, populating meta and subkeys\"\"\"\n if len(line) < 4:\n # Line is too short, nothing to parse\n return\n\n if line.startswith(\"#\"):\n self.add_meta(line)\n elif line.startswith('\"'):\n try:\n key, value = re.split(re.compile(r\"(?<![^\\\\]\\\\\\\")=\"), line, maxsplit=1)\n except ValueError as ex:\n logger.error(\"Unable to parse line %s\", line)\n logger.exception(ex)\n return\n key = key[1:-1]\n self.subkeys[key] = value\n elif line.startswith(\"@\"):\n key, value = line.split(\"=\", 1)\n self.subkeys[\"default\"] = value\n\n def add_to_last(self, line):\n last_subkey = next(reversed(self.subkeys))\n self.subkeys[last_subkey] += \"\\n{}\".format(line)\n\n def render(self):\n \"\"\"Return the content of the key in the wine .reg format\"\"\"\n content = self.raw_name + \" \" + self.raw_timestamp + \"\\n\"\n for key, value in self.metas.items():\n if value is None:\n content += \"#{}\\n\".format(key)\n else:\n content += \"#{}={}\\n\".format(key, value)\n for key, value in self.subkeys.items():\n if key == \"default\":\n key = \"@\"\n else:\n key = '\"{}\"'.format(key)\n content += \"{}={}\\n\".format(key, value)\n return content\n\n def render_value(self, value):\n if isinstance(value, int):\n return \"dword:{:08x}\".format(value)\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n raise NotImplementedError(\"TODO\")\n\n def add_meta(self, meta_line):\n if not meta_line.startswith(\"#\"):\n raise ValueError(\"Key metas should start with '#'\")\n meta_line = meta_line[1:]\n parts = meta_line.split(\"=\")\n if len(parts) == 2:\n key = parts[0]\n value = parts[1]\n elif len(parts) == 1:\n key = parts[0]\n value = None\n else:\n raise ValueError(\"Invalid meta line '{}'\".format(meta_line))\n self.metas[key] = value\n\n def get_meta(self, name):\n return self.metas.get(name)\n\n def set_subkey(self, name, value):\n self.subkeys[name] = self.render_value(value)\n\n def get_subkey(self, name):\n if name not in self.subkeys:\n return None\n value = self.subkeys[name]\n if value.startswith('\"') and value.endswith('\"'):\n return value[1:-1]\n if value.startswith(\"dword:\"):\n return int(value[6:], 16)\n raise ValueError(\"Handle %s\" % value)\n", "path": "lutris/util/wine/registry.py"}]}
| 3,617 | 536 |
gh_patches_debug_33078
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-1641
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Signals integration breaks on partial objects for python <3.10
### How do you use Sentry?
Self-hosted/on-premise
### Version
1.9.9
### Steps to Reproduce
1. Use python older than 3.10.
2. Register a partial function as a signal handler.
### Expected Result
Signal is traced correctly.
### Actual Result
Exception is raised from `_get_receiver_name` function as `partial` objects don't have `__module__` before python 3.10 (and even there it's undocumented from what I can see).
It fails in our tests where we don't even register any signals so either Django itself or some kind of integration (Sentry?) registers such signals by default.
The whole signals integration is missing a `capture_internal_exceptions` context too I believe.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/signals_handlers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 from django.dispatch import Signal
5
6 from sentry_sdk import Hub
7 from sentry_sdk._types import MYPY
8
9
10 if MYPY:
11 from typing import Any
12 from typing import Callable
13 from typing import List
14
15
16 def patch_signals():
17 # type: () -> None
18 """Patch django signal receivers to create a span"""
19
20 old_live_receivers = Signal._live_receivers
21
22 def _get_receiver_name(receiver):
23 # type: (Callable[..., Any]) -> str
24 name = receiver.__module__ + "."
25 if hasattr(receiver, "__name__"):
26 return name + receiver.__name__
27 return name + str(receiver)
28
29 def _sentry_live_receivers(self, sender):
30 # type: (Signal, Any) -> List[Callable[..., Any]]
31 hub = Hub.current
32 receivers = old_live_receivers(self, sender)
33
34 def sentry_receiver_wrapper(receiver):
35 # type: (Callable[..., Any]) -> Callable[..., Any]
36 def wrapper(*args, **kwargs):
37 # type: (Any, Any) -> Any
38 with hub.start_span(
39 op="django.signals",
40 description=_get_receiver_name(receiver),
41 ) as span:
42 span.set_data("signal", _get_receiver_name(receiver))
43 return receiver(*args, **kwargs)
44
45 return wrapper
46
47 for idx, receiver in enumerate(receivers):
48 receivers[idx] = sentry_receiver_wrapper(receiver)
49
50 return receivers
51
52 Signal._live_receivers = _sentry_live_receivers
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py
--- a/sentry_sdk/integrations/django/signals_handlers.py
+++ b/sentry_sdk/integrations/django/signals_handlers.py
@@ -13,19 +13,32 @@
from typing import List
+def _get_receiver_name(receiver):
+ # type: (Callable[..., Any]) -> str
+ name = ""
+
+ if hasattr(receiver, "__qualname__"):
+ name += receiver.__qualname__
+ elif hasattr(receiver, "__name__"): # Python 2.7 has no __qualname__
+ name += receiver.__name__
+
+ if (
+ name == ""
+ ): # certain functions (like partials) dont have a name so return the string representation
+ return str(receiver)
+
+ if hasattr(receiver, "__module__"): # prepend with module, if there is one
+ name = receiver.__module__ + "." + name
+
+ return name
+
+
def patch_signals():
# type: () -> None
"""Patch django signal receivers to create a span"""
old_live_receivers = Signal._live_receivers
- def _get_receiver_name(receiver):
- # type: (Callable[..., Any]) -> str
- name = receiver.__module__ + "."
- if hasattr(receiver, "__name__"):
- return name + receiver.__name__
- return name + str(receiver)
-
def _sentry_live_receivers(self, sender):
# type: (Signal, Any) -> List[Callable[..., Any]]
hub = Hub.current
@@ -35,11 +48,12 @@
# type: (Callable[..., Any]) -> Callable[..., Any]
def wrapper(*args, **kwargs):
# type: (Any, Any) -> Any
+ signal_name = _get_receiver_name(receiver)
with hub.start_span(
op="django.signals",
- description=_get_receiver_name(receiver),
+ description=signal_name,
) as span:
- span.set_data("signal", _get_receiver_name(receiver))
+ span.set_data("signal", signal_name)
return receiver(*args, **kwargs)
return wrapper
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py\n--- a/sentry_sdk/integrations/django/signals_handlers.py\n+++ b/sentry_sdk/integrations/django/signals_handlers.py\n@@ -13,19 +13,32 @@\n from typing import List\n \n \n+def _get_receiver_name(receiver):\n+ # type: (Callable[..., Any]) -> str\n+ name = \"\"\n+\n+ if hasattr(receiver, \"__qualname__\"):\n+ name += receiver.__qualname__\n+ elif hasattr(receiver, \"__name__\"): # Python 2.7 has no __qualname__\n+ name += receiver.__name__\n+\n+ if (\n+ name == \"\"\n+ ): # certain functions (like partials) dont have a name so return the string representation\n+ return str(receiver)\n+\n+ if hasattr(receiver, \"__module__\"): # prepend with module, if there is one\n+ name = receiver.__module__ + \".\" + name\n+\n+ return name\n+\n+\n def patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n \n old_live_receivers = Signal._live_receivers\n \n- def _get_receiver_name(receiver):\n- # type: (Callable[..., Any]) -> str\n- name = receiver.__module__ + \".\"\n- if hasattr(receiver, \"__name__\"):\n- return name + receiver.__name__\n- return name + str(receiver)\n-\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n@@ -35,11 +48,12 @@\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n+ signal_name = _get_receiver_name(receiver)\n with hub.start_span(\n op=\"django.signals\",\n- description=_get_receiver_name(receiver),\n+ description=signal_name,\n ) as span:\n- span.set_data(\"signal\", _get_receiver_name(receiver))\n+ span.set_data(\"signal\", signal_name)\n return receiver(*args, **kwargs)\n \n return wrapper\n", "issue": "Django Signals integration breaks on partial objects for python <3.10\n### How do you use Sentry?\n\nSelf-hosted/on-premise\n\n### Version\n\n1.9.9\n\n### Steps to Reproduce\n\n1. Use python older than 3.10.\r\n2. Register a partial function as a signal handler.\n\n### Expected Result\n\nSignal is traced correctly.\n\n### Actual Result\n\nException is raised from `_get_receiver_name` function as `partial` objects don't have `__module__` before python 3.10 (and even there it's undocumented from what I can see).\r\n\r\nIt fails in our tests where we don't even register any signals so either Django itself or some kind of integration (Sentry?) registers such signals by default.\r\n\r\nThe whole signals integration is missing a `capture_internal_exceptions` context too I believe.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom django.dispatch import Signal\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import List\n\n\ndef patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n\n old_live_receivers = Signal._live_receivers\n\n def _get_receiver_name(receiver):\n # type: (Callable[..., Any]) -> str\n name = receiver.__module__ + \".\"\n if hasattr(receiver, \"__name__\"):\n return name + receiver.__name__\n return name + str(receiver)\n\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n receivers = old_live_receivers(self, sender)\n\n def sentry_receiver_wrapper(receiver):\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n with hub.start_span(\n op=\"django.signals\",\n description=_get_receiver_name(receiver),\n ) as span:\n span.set_data(\"signal\", _get_receiver_name(receiver))\n return receiver(*args, **kwargs)\n\n return wrapper\n\n for idx, receiver in enumerate(receivers):\n receivers[idx] = sentry_receiver_wrapper(receiver)\n\n return receivers\n\n Signal._live_receivers = _sentry_live_receivers\n", "path": "sentry_sdk/integrations/django/signals_handlers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom django.dispatch import Signal\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import List\n\n\ndef _get_receiver_name(receiver):\n # type: (Callable[..., Any]) -> str\n name = \"\"\n\n if hasattr(receiver, \"__qualname__\"):\n name += receiver.__qualname__\n elif hasattr(receiver, \"__name__\"): # Python 2.7 has no __qualname__\n name += receiver.__name__\n\n if (\n name == \"\"\n ): # certain functions (like partials) dont have a name so return the string representation\n return str(receiver)\n\n if hasattr(receiver, \"__module__\"): # prepend with module, if there is one\n name = receiver.__module__ + \".\" + name\n\n return name\n\n\ndef patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n\n old_live_receivers = Signal._live_receivers\n\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n receivers = old_live_receivers(self, sender)\n\n def sentry_receiver_wrapper(receiver):\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n signal_name = _get_receiver_name(receiver)\n with hub.start_span(\n op=\"django.signals\",\n description=signal_name,\n ) as span:\n span.set_data(\"signal\", signal_name)\n return receiver(*args, **kwargs)\n\n return wrapper\n\n for idx, receiver in enumerate(receivers):\n receivers[idx] = sentry_receiver_wrapper(receiver)\n\n return receivers\n\n Signal._live_receivers = _sentry_live_receivers\n", "path": "sentry_sdk/integrations/django/signals_handlers.py"}]}
| 888 | 506 |
gh_patches_debug_40637
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-20505
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deleted files cause `pants tailor` with `--changed-since` to fail
**Describe the bug**
I use the following command in CI to validate the monorepo, as recommended by the docs:
```shell
> pants \
--changed-since=origin/main \
tailor --check \
update-build-files --check \
lint
```
However, if I delete a package, including its `BUILD` file in a PR, the `--changed-since` flag causes `tailor` to try to run on those files, which `pants` blows up on:
```shell
16:40:57.91 [ERROR] 1 Exception encountered:
Engine traceback:
in `tailor` goal
IntrinsicError: Unmatched glob from `--changed-since`: "aws/projects/my_project_name/*"
Do the file(s) exist? If so, check if the file(s) are in your `.gitignore` or the global `pants_ignore` option, which may result in Pants not being able to see the file(s) even though they exist on disk. Refer to https://www.pantsbuild.org/v2.19/docs/troubleshooting#pants-cannot-find-a-file-in-your-project.
Exited with code exit status 1
```
If files are deleted, yes, they are changed, but they shouldn't throw an error.
**Pants version**
2.19.0
**OS**
Linux (CircleCI Ubuntu executor)
**Additional info**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/init/specs_calculator.py`
Content:
```
1 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import logging
5 from typing import cast
6
7 from pants.base.specs import AddressLiteralSpec, FileLiteralSpec, RawSpecs, Specs
8 from pants.base.specs_parser import SpecsParser
9 from pants.core.util_rules.environments import determine_bootstrap_environment
10 from pants.core.util_rules.system_binaries import GitBinary
11 from pants.engine.addresses import AddressInput
12 from pants.engine.environment import EnvironmentName
13 from pants.engine.internals.scheduler import SchedulerSession
14 from pants.engine.internals.selectors import Params
15 from pants.engine.rules import QueryRule
16 from pants.option.options import Options
17 from pants.option.options_bootstrapper import OptionsBootstrapper
18 from pants.util.frozendict import FrozenDict
19 from pants.vcs.changed import ChangedAddresses, ChangedOptions, ChangedRequest
20 from pants.vcs.git import GitWorktreeRequest, MaybeGitWorktree
21
22 logger = logging.getLogger(__name__)
23
24
25 class InvalidSpecConstraint(Exception):
26 """Raised when invalid constraints are given via specs and arguments like --changed*."""
27
28
29 def calculate_specs(
30 options_bootstrapper: OptionsBootstrapper,
31 options: Options,
32 session: SchedulerSession,
33 working_dir: str,
34 ) -> Specs:
35 """Determine the specs for a given Pants run."""
36 global_options = options.for_global_scope()
37 unmatched_cli_globs = global_options.unmatched_cli_globs
38 specs = SpecsParser(working_dir=working_dir).parse_specs(
39 options.specs,
40 description_of_origin="CLI arguments",
41 unmatched_glob_behavior=unmatched_cli_globs,
42 )
43
44 changed_options = ChangedOptions.from_options(options.for_scope("changed"))
45 logger.debug("specs are: %s", specs)
46 logger.debug("changed_options are: %s", changed_options)
47
48 if specs and changed_options.provided:
49 changed_name = "--changed-since" if changed_options.since else "--changed-diffspec"
50 specs_description = specs.arguments_provided_description()
51 assert specs_description is not None
52 raise InvalidSpecConstraint(
53 f"You used `{changed_name}` at the same time as using {specs_description}. You can "
54 f"only use `{changed_name}` or use normal arguments."
55 )
56
57 if not changed_options.provided:
58 return specs
59
60 bootstrap_environment = determine_bootstrap_environment(session)
61
62 (git_binary,) = session.product_request(GitBinary, [Params(bootstrap_environment)])
63 (maybe_git_worktree,) = session.product_request(
64 MaybeGitWorktree, [Params(GitWorktreeRequest(), git_binary, bootstrap_environment)]
65 )
66 if not maybe_git_worktree.git_worktree:
67 raise InvalidSpecConstraint(
68 "The `--changed-*` options are only available if Git is used for the repository."
69 )
70
71 changed_files = tuple(changed_options.changed_files(maybe_git_worktree.git_worktree))
72 file_literal_specs = tuple(FileLiteralSpec(f) for f in changed_files)
73
74 changed_request = ChangedRequest(changed_files, changed_options.dependents)
75 (changed_addresses,) = session.product_request(
76 ChangedAddresses,
77 [Params(changed_request, options_bootstrapper, bootstrap_environment)],
78 )
79 logger.debug("changed addresses: %s", changed_addresses)
80
81 address_literal_specs = []
82 for address in cast(ChangedAddresses, changed_addresses):
83 address_input = AddressInput.parse(address.spec, description_of_origin="`--changed-since`")
84 address_literal_specs.append(
85 AddressLiteralSpec(
86 path_component=address_input.path_component,
87 target_component=address_input.target_component,
88 generated_component=address_input.generated_component,
89 parameters=FrozenDict(address_input.parameters),
90 )
91 )
92
93 return Specs(
94 includes=RawSpecs(
95 # We need both address_literals and file_literals to cover all our edge cases, including
96 # target-aware vs. target-less goals, e.g. `list` vs `count-loc`.
97 address_literals=tuple(address_literal_specs),
98 file_literals=file_literal_specs,
99 unmatched_glob_behavior=unmatched_cli_globs,
100 filter_by_global_options=True,
101 from_change_detection=True,
102 description_of_origin="`--changed-since`",
103 ),
104 ignores=RawSpecs(description_of_origin="`--changed-since`"),
105 )
106
107
108 def rules():
109 return [
110 QueryRule(ChangedAddresses, [ChangedRequest, EnvironmentName]),
111 QueryRule(GitBinary, [EnvironmentName]),
112 QueryRule(MaybeGitWorktree, [GitWorktreeRequest, GitBinary, EnvironmentName]),
113 ]
114
```
Path: `src/python/pants/vcs/git.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import dataclasses
7 import logging
8 import os
9 from dataclasses import dataclass
10 from os import PathLike
11 from pathlib import Path, PurePath
12 from typing import Any, Iterable
13
14 from pants.core.util_rules.system_binaries import GitBinary, GitBinaryException, MaybeGitBinary
15 from pants.engine.engine_aware import EngineAwareReturnType
16 from pants.engine.rules import collect_rules, rule
17 from pants.util.contextutil import pushd
18
19 logger = logging.getLogger(__name__)
20
21
22 class GitWorktree(EngineAwareReturnType):
23 """Implements a safe wrapper for un-sandboxed access to Git in the user's working copy.
24
25 This type (and any wrappers) should be marked `EngineAwareReturnType.cacheable=False`, because
26 it internally uses un-sandboxed APIs, and `@rules` which produce it should re-run in each
27 session. It additionally implements a default `__eq__` in order to prevent early-cutoff in the
28 graph, and force any consumers of the type to re-run.
29 """
30
31 worktree: PurePath
32 _gitdir: PurePath
33 _git_binary: GitBinary
34
35 def __init__(
36 self,
37 binary: GitBinary,
38 worktree: PathLike[str] | None = None,
39 gitdir: PathLike[str] | None = None,
40 ) -> None:
41 """Creates a git object that assumes the git repository is in the cwd by default.
42
43 binary: The git binary to use.
44 worktree: The path to the git repository working tree directory (typically '.').
45 gitdir: The path to the repository's git metadata directory (typically '.git').
46 """
47 self.worktree = Path(worktree or os.getcwd()).resolve()
48 self._gitdir = Path(gitdir).resolve() if gitdir else (self.worktree / ".git")
49 self._git_binary = binary
50
51 def cacheable(self) -> bool:
52 return False
53
54 @property
55 def current_rev_identifier(self):
56 return "HEAD"
57
58 @property
59 def commit_id(self):
60 return self._git_binary._invoke_unsandboxed(self._create_git_cmdline(["rev-parse", "HEAD"]))
61
62 @property
63 def branch_name(self) -> str | None:
64 branch = self._git_binary._invoke_unsandboxed(
65 self._create_git_cmdline(["rev-parse", "--abbrev-ref", "HEAD"])
66 )
67 return None if branch == "HEAD" else branch
68
69 def _fix_git_relative_path(self, worktree_path: str, relative_to: PurePath | str) -> str:
70 return str((self.worktree / worktree_path).relative_to(relative_to))
71
72 def changed_files(
73 self,
74 from_commit: str | None = None,
75 include_untracked: bool = False,
76 relative_to: PurePath | str | None = None,
77 ) -> set[str]:
78 relative_to = PurePath(relative_to) if relative_to is not None else self.worktree
79 rel_suffix = ["--", str(relative_to)]
80 uncommitted_changes = self._git_binary._invoke_unsandboxed(
81 self._create_git_cmdline(
82 ["diff", "--name-only", "HEAD"] + rel_suffix,
83 )
84 )
85
86 files = set(uncommitted_changes.splitlines())
87 if from_commit:
88 # Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just
89 # the changes that have occurred on the current branch.
90 committed_cmd = ["diff", "--name-only", from_commit + "...HEAD"] + rel_suffix
91 committed_changes = self._git_binary._invoke_unsandboxed(
92 self._create_git_cmdline(committed_cmd)
93 )
94 files.update(committed_changes.split())
95 if include_untracked:
96 untracked_cmd = [
97 "ls-files",
98 "--other",
99 "--exclude-standard",
100 "--full-name",
101 ] + rel_suffix
102 untracked = self._git_binary._invoke_unsandboxed(
103 self._create_git_cmdline(untracked_cmd)
104 )
105 files.update(untracked.split())
106 # git will report changed files relative to the worktree: re-relativize to relative_to
107 return {self._fix_git_relative_path(f, relative_to) for f in files}
108
109 def changes_in(self, diffspec: str, relative_to: PurePath | str | None = None) -> set[str]:
110 relative_to = PurePath(relative_to) if relative_to is not None else self.worktree
111 cmd = ["diff-tree", "--no-commit-id", "--name-only", "-r", diffspec]
112 files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).split()
113 return {self._fix_git_relative_path(f.strip(), relative_to) for f in files}
114
115 def _create_git_cmdline(self, args: Iterable[str]) -> list[str]:
116 return [f"--git-dir={self._gitdir}", f"--work-tree={self.worktree}", *args]
117
118 def __eq__(self, other: Any) -> bool:
119 # NB: See the class doc regarding equality.
120 return id(self) == id(other)
121
122
123 @dataclass(frozen=True)
124 class MaybeGitWorktree(EngineAwareReturnType):
125 git_worktree: GitWorktree | None = None
126
127 def cacheable(self) -> bool:
128 return False
129
130
131 @dataclasses.dataclass(frozen=True)
132 class GitWorktreeRequest:
133 gitdir: PathLike[str] | None = None
134 subdir: PathLike[str] | None = None
135
136
137 @rule
138 async def get_git_worktree(
139 git_worktree_request: GitWorktreeRequest,
140 maybe_git_binary: MaybeGitBinary,
141 ) -> MaybeGitWorktree:
142 if not maybe_git_binary.git_binary:
143 return MaybeGitWorktree()
144
145 git_binary = maybe_git_binary.git_binary
146 cmd = ["rev-parse", "--show-toplevel"]
147
148 try:
149 if git_worktree_request.subdir:
150 with pushd(str(git_worktree_request.subdir)):
151 output = git_binary._invoke_unsandboxed(cmd)
152 else:
153 output = git_binary._invoke_unsandboxed(cmd)
154 except GitBinaryException as e:
155 logger.info(f"No git repository at {os.getcwd()}: {e!r}")
156 return MaybeGitWorktree()
157
158 git_worktree = GitWorktree(
159 binary=git_binary,
160 gitdir=git_worktree_request.gitdir,
161 worktree=PurePath(output),
162 )
163
164 logger.debug(
165 f"Detected git repository at {git_worktree.worktree} on branch {git_worktree.branch_name}"
166 )
167 return MaybeGitWorktree(git_worktree=git_worktree)
168
169
170 def rules():
171 return [*collect_rules()]
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/init/specs_calculator.py b/src/python/pants/init/specs_calculator.py
--- a/src/python/pants/init/specs_calculator.py
+++ b/src/python/pants/init/specs_calculator.py
@@ -4,6 +4,7 @@
import logging
from typing import cast
+from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.base.specs import AddressLiteralSpec, FileLiteralSpec, RawSpecs, Specs
from pants.base.specs_parser import SpecsParser
from pants.core.util_rules.environments import determine_bootstrap_environment
@@ -96,7 +97,11 @@
# target-aware vs. target-less goals, e.g. `list` vs `count-loc`.
address_literals=tuple(address_literal_specs),
file_literals=file_literal_specs,
- unmatched_glob_behavior=unmatched_cli_globs,
+ # The globs here are synthesized from VCS data by the `changed` mechanism.
+ # As such it does not make sense to apply user-facing matching errors to them.
+ # In particular, they can legitimately not match anything, if entire git
+ # subtrees were deleted for example.
+ unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
filter_by_global_options=True,
from_change_detection=True,
description_of_origin="`--changed-since`",
diff --git a/src/python/pants/vcs/git.py b/src/python/pants/vcs/git.py
--- a/src/python/pants/vcs/git.py
+++ b/src/python/pants/vcs/git.py
@@ -91,7 +91,7 @@
committed_changes = self._git_binary._invoke_unsandboxed(
self._create_git_cmdline(committed_cmd)
)
- files.update(committed_changes.split())
+ files.update(committed_changes.splitlines())
if include_untracked:
untracked_cmd = [
"ls-files",
@@ -102,14 +102,14 @@
untracked = self._git_binary._invoke_unsandboxed(
self._create_git_cmdline(untracked_cmd)
)
- files.update(untracked.split())
+ files.update(untracked.splitlines())
# git will report changed files relative to the worktree: re-relativize to relative_to
return {self._fix_git_relative_path(f, relative_to) for f in files}
def changes_in(self, diffspec: str, relative_to: PurePath | str | None = None) -> set[str]:
relative_to = PurePath(relative_to) if relative_to is not None else self.worktree
cmd = ["diff-tree", "--no-commit-id", "--name-only", "-r", diffspec]
- files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).split()
+ files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).splitlines()
return {self._fix_git_relative_path(f.strip(), relative_to) for f in files}
def _create_git_cmdline(self, args: Iterable[str]) -> list[str]:
|
{"golden_diff": "diff --git a/src/python/pants/init/specs_calculator.py b/src/python/pants/init/specs_calculator.py\n--- a/src/python/pants/init/specs_calculator.py\n+++ b/src/python/pants/init/specs_calculator.py\n@@ -4,6 +4,7 @@\n import logging\n from typing import cast\n \n+from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior\n from pants.base.specs import AddressLiteralSpec, FileLiteralSpec, RawSpecs, Specs\n from pants.base.specs_parser import SpecsParser\n from pants.core.util_rules.environments import determine_bootstrap_environment\n@@ -96,7 +97,11 @@\n # target-aware vs. target-less goals, e.g. `list` vs `count-loc`.\n address_literals=tuple(address_literal_specs),\n file_literals=file_literal_specs,\n- unmatched_glob_behavior=unmatched_cli_globs,\n+ # The globs here are synthesized from VCS data by the `changed` mechanism.\n+ # As such it does not make sense to apply user-facing matching errors to them.\n+ # In particular, they can legitimately not match anything, if entire git\n+ # subtrees were deleted for example.\n+ unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,\n filter_by_global_options=True,\n from_change_detection=True,\n description_of_origin=\"`--changed-since`\",\ndiff --git a/src/python/pants/vcs/git.py b/src/python/pants/vcs/git.py\n--- a/src/python/pants/vcs/git.py\n+++ b/src/python/pants/vcs/git.py\n@@ -91,7 +91,7 @@\n committed_changes = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(committed_cmd)\n )\n- files.update(committed_changes.split())\n+ files.update(committed_changes.splitlines())\n if include_untracked:\n untracked_cmd = [\n \"ls-files\",\n@@ -102,14 +102,14 @@\n untracked = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(untracked_cmd)\n )\n- files.update(untracked.split())\n+ files.update(untracked.splitlines())\n # git will report changed files relative to the worktree: re-relativize to relative_to\n return {self._fix_git_relative_path(f, relative_to) for f in files}\n \n def changes_in(self, diffspec: str, relative_to: PurePath | str | None = None) -> set[str]:\n relative_to = PurePath(relative_to) if relative_to is not None else self.worktree\n cmd = [\"diff-tree\", \"--no-commit-id\", \"--name-only\", \"-r\", diffspec]\n- files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).split()\n+ files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).splitlines()\n return {self._fix_git_relative_path(f.strip(), relative_to) for f in files}\n \n def _create_git_cmdline(self, args: Iterable[str]) -> list[str]:\n", "issue": "Deleted files cause `pants tailor` with `--changed-since` to fail\n**Describe the bug**\r\n\r\nI use the following command in CI to validate the monorepo, as recommended by the docs:\r\n\r\n```shell\r\n> pants \\\r\n\t --changed-since=origin/main \\\r\n\t tailor --check \\\r\n\t update-build-files --check \\\r\n\t lint\r\n```\r\n\r\nHowever, if I delete a package, including its `BUILD` file in a PR, the `--changed-since` flag causes `tailor` to try to run on those files, which `pants` blows up on:\r\n\r\n```shell\r\n16:40:57.91 [ERROR] 1 Exception encountered:\r\n\r\nEngine traceback:\r\n in `tailor` goal\r\n\r\nIntrinsicError: Unmatched glob from `--changed-since`: \"aws/projects/my_project_name/*\"\r\n\r\nDo the file(s) exist? If so, check if the file(s) are in your `.gitignore` or the global `pants_ignore` option, which may result in Pants not being able to see the file(s) even though they exist on disk. Refer to https://www.pantsbuild.org/v2.19/docs/troubleshooting#pants-cannot-find-a-file-in-your-project.\r\n\r\n\r\nExited with code exit status 1\r\n```\r\n\r\nIf files are deleted, yes, they are changed, but they shouldn't throw an error.\r\n\r\n**Pants version**\r\n\r\n2.19.0\r\n\r\n**OS**\r\n\r\nLinux (CircleCI Ubuntu executor)\r\n\r\n**Additional info**\r\n\r\nN/A\r\n\n", "before_files": [{"content": "# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport logging\nfrom typing import cast\n\nfrom pants.base.specs import AddressLiteralSpec, FileLiteralSpec, RawSpecs, Specs\nfrom pants.base.specs_parser import SpecsParser\nfrom pants.core.util_rules.environments import determine_bootstrap_environment\nfrom pants.core.util_rules.system_binaries import GitBinary\nfrom pants.engine.addresses import AddressInput\nfrom pants.engine.environment import EnvironmentName\nfrom pants.engine.internals.scheduler import SchedulerSession\nfrom pants.engine.internals.selectors import Params\nfrom pants.engine.rules import QueryRule\nfrom pants.option.options import Options\nfrom pants.option.options_bootstrapper import OptionsBootstrapper\nfrom pants.util.frozendict import FrozenDict\nfrom pants.vcs.changed import ChangedAddresses, ChangedOptions, ChangedRequest\nfrom pants.vcs.git import GitWorktreeRequest, MaybeGitWorktree\n\nlogger = logging.getLogger(__name__)\n\n\nclass InvalidSpecConstraint(Exception):\n \"\"\"Raised when invalid constraints are given via specs and arguments like --changed*.\"\"\"\n\n\ndef calculate_specs(\n options_bootstrapper: OptionsBootstrapper,\n options: Options,\n session: SchedulerSession,\n working_dir: str,\n) -> Specs:\n \"\"\"Determine the specs for a given Pants run.\"\"\"\n global_options = options.for_global_scope()\n unmatched_cli_globs = global_options.unmatched_cli_globs\n specs = SpecsParser(working_dir=working_dir).parse_specs(\n options.specs,\n description_of_origin=\"CLI arguments\",\n unmatched_glob_behavior=unmatched_cli_globs,\n )\n\n changed_options = ChangedOptions.from_options(options.for_scope(\"changed\"))\n logger.debug(\"specs are: %s\", specs)\n logger.debug(\"changed_options are: %s\", changed_options)\n\n if specs and changed_options.provided:\n changed_name = \"--changed-since\" if changed_options.since else \"--changed-diffspec\"\n specs_description = specs.arguments_provided_description()\n assert specs_description is not None\n raise InvalidSpecConstraint(\n f\"You used `{changed_name}` at the same time as using {specs_description}. You can \"\n f\"only use `{changed_name}` or use normal arguments.\"\n )\n\n if not changed_options.provided:\n return specs\n\n bootstrap_environment = determine_bootstrap_environment(session)\n\n (git_binary,) = session.product_request(GitBinary, [Params(bootstrap_environment)])\n (maybe_git_worktree,) = session.product_request(\n MaybeGitWorktree, [Params(GitWorktreeRequest(), git_binary, bootstrap_environment)]\n )\n if not maybe_git_worktree.git_worktree:\n raise InvalidSpecConstraint(\n \"The `--changed-*` options are only available if Git is used for the repository.\"\n )\n\n changed_files = tuple(changed_options.changed_files(maybe_git_worktree.git_worktree))\n file_literal_specs = tuple(FileLiteralSpec(f) for f in changed_files)\n\n changed_request = ChangedRequest(changed_files, changed_options.dependents)\n (changed_addresses,) = session.product_request(\n ChangedAddresses,\n [Params(changed_request, options_bootstrapper, bootstrap_environment)],\n )\n logger.debug(\"changed addresses: %s\", changed_addresses)\n\n address_literal_specs = []\n for address in cast(ChangedAddresses, changed_addresses):\n address_input = AddressInput.parse(address.spec, description_of_origin=\"`--changed-since`\")\n address_literal_specs.append(\n AddressLiteralSpec(\n path_component=address_input.path_component,\n target_component=address_input.target_component,\n generated_component=address_input.generated_component,\n parameters=FrozenDict(address_input.parameters),\n )\n )\n\n return Specs(\n includes=RawSpecs(\n # We need both address_literals and file_literals to cover all our edge cases, including\n # target-aware vs. target-less goals, e.g. `list` vs `count-loc`.\n address_literals=tuple(address_literal_specs),\n file_literals=file_literal_specs,\n unmatched_glob_behavior=unmatched_cli_globs,\n filter_by_global_options=True,\n from_change_detection=True,\n description_of_origin=\"`--changed-since`\",\n ),\n ignores=RawSpecs(description_of_origin=\"`--changed-since`\"),\n )\n\n\ndef rules():\n return [\n QueryRule(ChangedAddresses, [ChangedRequest, EnvironmentName]),\n QueryRule(GitBinary, [EnvironmentName]),\n QueryRule(MaybeGitWorktree, [GitWorktreeRequest, GitBinary, EnvironmentName]),\n ]\n", "path": "src/python/pants/init/specs_calculator.py"}, {"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport logging\nimport os\nfrom dataclasses import dataclass\nfrom os import PathLike\nfrom pathlib import Path, PurePath\nfrom typing import Any, Iterable\n\nfrom pants.core.util_rules.system_binaries import GitBinary, GitBinaryException, MaybeGitBinary\nfrom pants.engine.engine_aware import EngineAwareReturnType\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.util.contextutil import pushd\n\nlogger = logging.getLogger(__name__)\n\n\nclass GitWorktree(EngineAwareReturnType):\n \"\"\"Implements a safe wrapper for un-sandboxed access to Git in the user's working copy.\n\n This type (and any wrappers) should be marked `EngineAwareReturnType.cacheable=False`, because\n it internally uses un-sandboxed APIs, and `@rules` which produce it should re-run in each\n session. It additionally implements a default `__eq__` in order to prevent early-cutoff in the\n graph, and force any consumers of the type to re-run.\n \"\"\"\n\n worktree: PurePath\n _gitdir: PurePath\n _git_binary: GitBinary\n\n def __init__(\n self,\n binary: GitBinary,\n worktree: PathLike[str] | None = None,\n gitdir: PathLike[str] | None = None,\n ) -> None:\n \"\"\"Creates a git object that assumes the git repository is in the cwd by default.\n\n binary: The git binary to use.\n worktree: The path to the git repository working tree directory (typically '.').\n gitdir: The path to the repository's git metadata directory (typically '.git').\n \"\"\"\n self.worktree = Path(worktree or os.getcwd()).resolve()\n self._gitdir = Path(gitdir).resolve() if gitdir else (self.worktree / \".git\")\n self._git_binary = binary\n\n def cacheable(self) -> bool:\n return False\n\n @property\n def current_rev_identifier(self):\n return \"HEAD\"\n\n @property\n def commit_id(self):\n return self._git_binary._invoke_unsandboxed(self._create_git_cmdline([\"rev-parse\", \"HEAD\"]))\n\n @property\n def branch_name(self) -> str | None:\n branch = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline([\"rev-parse\", \"--abbrev-ref\", \"HEAD\"])\n )\n return None if branch == \"HEAD\" else branch\n\n def _fix_git_relative_path(self, worktree_path: str, relative_to: PurePath | str) -> str:\n return str((self.worktree / worktree_path).relative_to(relative_to))\n\n def changed_files(\n self,\n from_commit: str | None = None,\n include_untracked: bool = False,\n relative_to: PurePath | str | None = None,\n ) -> set[str]:\n relative_to = PurePath(relative_to) if relative_to is not None else self.worktree\n rel_suffix = [\"--\", str(relative_to)]\n uncommitted_changes = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(\n [\"diff\", \"--name-only\", \"HEAD\"] + rel_suffix,\n )\n )\n\n files = set(uncommitted_changes.splitlines())\n if from_commit:\n # Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just\n # the changes that have occurred on the current branch.\n committed_cmd = [\"diff\", \"--name-only\", from_commit + \"...HEAD\"] + rel_suffix\n committed_changes = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(committed_cmd)\n )\n files.update(committed_changes.split())\n if include_untracked:\n untracked_cmd = [\n \"ls-files\",\n \"--other\",\n \"--exclude-standard\",\n \"--full-name\",\n ] + rel_suffix\n untracked = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(untracked_cmd)\n )\n files.update(untracked.split())\n # git will report changed files relative to the worktree: re-relativize to relative_to\n return {self._fix_git_relative_path(f, relative_to) for f in files}\n\n def changes_in(self, diffspec: str, relative_to: PurePath | str | None = None) -> set[str]:\n relative_to = PurePath(relative_to) if relative_to is not None else self.worktree\n cmd = [\"diff-tree\", \"--no-commit-id\", \"--name-only\", \"-r\", diffspec]\n files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).split()\n return {self._fix_git_relative_path(f.strip(), relative_to) for f in files}\n\n def _create_git_cmdline(self, args: Iterable[str]) -> list[str]:\n return [f\"--git-dir={self._gitdir}\", f\"--work-tree={self.worktree}\", *args]\n\n def __eq__(self, other: Any) -> bool:\n # NB: See the class doc regarding equality.\n return id(self) == id(other)\n\n\n@dataclass(frozen=True)\nclass MaybeGitWorktree(EngineAwareReturnType):\n git_worktree: GitWorktree | None = None\n\n def cacheable(self) -> bool:\n return False\n\n\[email protected](frozen=True)\nclass GitWorktreeRequest:\n gitdir: PathLike[str] | None = None\n subdir: PathLike[str] | None = None\n\n\n@rule\nasync def get_git_worktree(\n git_worktree_request: GitWorktreeRequest,\n maybe_git_binary: MaybeGitBinary,\n) -> MaybeGitWorktree:\n if not maybe_git_binary.git_binary:\n return MaybeGitWorktree()\n\n git_binary = maybe_git_binary.git_binary\n cmd = [\"rev-parse\", \"--show-toplevel\"]\n\n try:\n if git_worktree_request.subdir:\n with pushd(str(git_worktree_request.subdir)):\n output = git_binary._invoke_unsandboxed(cmd)\n else:\n output = git_binary._invoke_unsandboxed(cmd)\n except GitBinaryException as e:\n logger.info(f\"No git repository at {os.getcwd()}: {e!r}\")\n return MaybeGitWorktree()\n\n git_worktree = GitWorktree(\n binary=git_binary,\n gitdir=git_worktree_request.gitdir,\n worktree=PurePath(output),\n )\n\n logger.debug(\n f\"Detected git repository at {git_worktree.worktree} on branch {git_worktree.branch_name}\"\n )\n return MaybeGitWorktree(git_worktree=git_worktree)\n\n\ndef rules():\n return [*collect_rules()]\n", "path": "src/python/pants/vcs/git.py"}], "after_files": [{"content": "# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport logging\nfrom typing import cast\n\nfrom pants.base.glob_match_error_behavior import GlobMatchErrorBehavior\nfrom pants.base.specs import AddressLiteralSpec, FileLiteralSpec, RawSpecs, Specs\nfrom pants.base.specs_parser import SpecsParser\nfrom pants.core.util_rules.environments import determine_bootstrap_environment\nfrom pants.core.util_rules.system_binaries import GitBinary\nfrom pants.engine.addresses import AddressInput\nfrom pants.engine.environment import EnvironmentName\nfrom pants.engine.internals.scheduler import SchedulerSession\nfrom pants.engine.internals.selectors import Params\nfrom pants.engine.rules import QueryRule\nfrom pants.option.options import Options\nfrom pants.option.options_bootstrapper import OptionsBootstrapper\nfrom pants.util.frozendict import FrozenDict\nfrom pants.vcs.changed import ChangedAddresses, ChangedOptions, ChangedRequest\nfrom pants.vcs.git import GitWorktreeRequest, MaybeGitWorktree\n\nlogger = logging.getLogger(__name__)\n\n\nclass InvalidSpecConstraint(Exception):\n \"\"\"Raised when invalid constraints are given via specs and arguments like --changed*.\"\"\"\n\n\ndef calculate_specs(\n options_bootstrapper: OptionsBootstrapper,\n options: Options,\n session: SchedulerSession,\n working_dir: str,\n) -> Specs:\n \"\"\"Determine the specs for a given Pants run.\"\"\"\n global_options = options.for_global_scope()\n unmatched_cli_globs = global_options.unmatched_cli_globs\n specs = SpecsParser(working_dir=working_dir).parse_specs(\n options.specs,\n description_of_origin=\"CLI arguments\",\n unmatched_glob_behavior=unmatched_cli_globs,\n )\n\n changed_options = ChangedOptions.from_options(options.for_scope(\"changed\"))\n logger.debug(\"specs are: %s\", specs)\n logger.debug(\"changed_options are: %s\", changed_options)\n\n if specs and changed_options.provided:\n changed_name = \"--changed-since\" if changed_options.since else \"--changed-diffspec\"\n specs_description = specs.arguments_provided_description()\n assert specs_description is not None\n raise InvalidSpecConstraint(\n f\"You used `{changed_name}` at the same time as using {specs_description}. You can \"\n f\"only use `{changed_name}` or use normal arguments.\"\n )\n\n if not changed_options.provided:\n return specs\n\n bootstrap_environment = determine_bootstrap_environment(session)\n\n (git_binary,) = session.product_request(GitBinary, [Params(bootstrap_environment)])\n (maybe_git_worktree,) = session.product_request(\n MaybeGitWorktree, [Params(GitWorktreeRequest(), git_binary, bootstrap_environment)]\n )\n if not maybe_git_worktree.git_worktree:\n raise InvalidSpecConstraint(\n \"The `--changed-*` options are only available if Git is used for the repository.\"\n )\n\n changed_files = tuple(changed_options.changed_files(maybe_git_worktree.git_worktree))\n file_literal_specs = tuple(FileLiteralSpec(f) for f in changed_files)\n\n changed_request = ChangedRequest(changed_files, changed_options.dependents)\n (changed_addresses,) = session.product_request(\n ChangedAddresses,\n [Params(changed_request, options_bootstrapper, bootstrap_environment)],\n )\n logger.debug(\"changed addresses: %s\", changed_addresses)\n\n address_literal_specs = []\n for address in cast(ChangedAddresses, changed_addresses):\n address_input = AddressInput.parse(address.spec, description_of_origin=\"`--changed-since`\")\n address_literal_specs.append(\n AddressLiteralSpec(\n path_component=address_input.path_component,\n target_component=address_input.target_component,\n generated_component=address_input.generated_component,\n parameters=FrozenDict(address_input.parameters),\n )\n )\n\n return Specs(\n includes=RawSpecs(\n # We need both address_literals and file_literals to cover all our edge cases, including\n # target-aware vs. target-less goals, e.g. `list` vs `count-loc`.\n address_literals=tuple(address_literal_specs),\n file_literals=file_literal_specs,\n # The globs here are synthesized from VCS data by the `changed` mechanism.\n # As such it does not make sense to apply user-facing matching errors to them.\n # In particular, they can legitimately not match anything, if entire git\n # subtrees were deleted for example.\n unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,\n filter_by_global_options=True,\n from_change_detection=True,\n description_of_origin=\"`--changed-since`\",\n ),\n ignores=RawSpecs(description_of_origin=\"`--changed-since`\"),\n )\n\n\ndef rules():\n return [\n QueryRule(ChangedAddresses, [ChangedRequest, EnvironmentName]),\n QueryRule(GitBinary, [EnvironmentName]),\n QueryRule(MaybeGitWorktree, [GitWorktreeRequest, GitBinary, EnvironmentName]),\n ]\n", "path": "src/python/pants/init/specs_calculator.py"}, {"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport logging\nimport os\nfrom dataclasses import dataclass\nfrom os import PathLike\nfrom pathlib import Path, PurePath\nfrom typing import Any, Iterable\n\nfrom pants.core.util_rules.system_binaries import GitBinary, GitBinaryException, MaybeGitBinary\nfrom pants.engine.engine_aware import EngineAwareReturnType\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.util.contextutil import pushd\n\nlogger = logging.getLogger(__name__)\n\n\nclass GitWorktree(EngineAwareReturnType):\n \"\"\"Implements a safe wrapper for un-sandboxed access to Git in the user's working copy.\n\n This type (and any wrappers) should be marked `EngineAwareReturnType.cacheable=False`, because\n it internally uses un-sandboxed APIs, and `@rules` which produce it should re-run in each\n session. It additionally implements a default `__eq__` in order to prevent early-cutoff in the\n graph, and force any consumers of the type to re-run.\n \"\"\"\n\n worktree: PurePath\n _gitdir: PurePath\n _git_binary: GitBinary\n\n def __init__(\n self,\n binary: GitBinary,\n worktree: PathLike[str] | None = None,\n gitdir: PathLike[str] | None = None,\n ) -> None:\n \"\"\"Creates a git object that assumes the git repository is in the cwd by default.\n\n binary: The git binary to use.\n worktree: The path to the git repository working tree directory (typically '.').\n gitdir: The path to the repository's git metadata directory (typically '.git').\n \"\"\"\n self.worktree = Path(worktree or os.getcwd()).resolve()\n self._gitdir = Path(gitdir).resolve() if gitdir else (self.worktree / \".git\")\n self._git_binary = binary\n\n def cacheable(self) -> bool:\n return False\n\n @property\n def current_rev_identifier(self):\n return \"HEAD\"\n\n @property\n def commit_id(self):\n return self._git_binary._invoke_unsandboxed(self._create_git_cmdline([\"rev-parse\", \"HEAD\"]))\n\n @property\n def branch_name(self) -> str | None:\n branch = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline([\"rev-parse\", \"--abbrev-ref\", \"HEAD\"])\n )\n return None if branch == \"HEAD\" else branch\n\n def _fix_git_relative_path(self, worktree_path: str, relative_to: PurePath | str) -> str:\n return str((self.worktree / worktree_path).relative_to(relative_to))\n\n def changed_files(\n self,\n from_commit: str | None = None,\n include_untracked: bool = False,\n relative_to: PurePath | str | None = None,\n ) -> set[str]:\n relative_to = PurePath(relative_to) if relative_to is not None else self.worktree\n rel_suffix = [\"--\", str(relative_to)]\n uncommitted_changes = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(\n [\"diff\", \"--name-only\", \"HEAD\"] + rel_suffix,\n )\n )\n\n files = set(uncommitted_changes.splitlines())\n if from_commit:\n # Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just\n # the changes that have occurred on the current branch.\n committed_cmd = [\"diff\", \"--name-only\", from_commit + \"...HEAD\"] + rel_suffix\n committed_changes = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(committed_cmd)\n )\n files.update(committed_changes.splitlines())\n if include_untracked:\n untracked_cmd = [\n \"ls-files\",\n \"--other\",\n \"--exclude-standard\",\n \"--full-name\",\n ] + rel_suffix\n untracked = self._git_binary._invoke_unsandboxed(\n self._create_git_cmdline(untracked_cmd)\n )\n files.update(untracked.splitlines())\n # git will report changed files relative to the worktree: re-relativize to relative_to\n return {self._fix_git_relative_path(f, relative_to) for f in files}\n\n def changes_in(self, diffspec: str, relative_to: PurePath | str | None = None) -> set[str]:\n relative_to = PurePath(relative_to) if relative_to is not None else self.worktree\n cmd = [\"diff-tree\", \"--no-commit-id\", \"--name-only\", \"-r\", diffspec]\n files = self._git_binary._invoke_unsandboxed(self._create_git_cmdline(cmd)).splitlines()\n return {self._fix_git_relative_path(f.strip(), relative_to) for f in files}\n\n def _create_git_cmdline(self, args: Iterable[str]) -> list[str]:\n return [f\"--git-dir={self._gitdir}\", f\"--work-tree={self.worktree}\", *args]\n\n def __eq__(self, other: Any) -> bool:\n # NB: See the class doc regarding equality.\n return id(self) == id(other)\n\n\n@dataclass(frozen=True)\nclass MaybeGitWorktree(EngineAwareReturnType):\n git_worktree: GitWorktree | None = None\n\n def cacheable(self) -> bool:\n return False\n\n\[email protected](frozen=True)\nclass GitWorktreeRequest:\n gitdir: PathLike[str] | None = None\n subdir: PathLike[str] | None = None\n\n\n@rule\nasync def get_git_worktree(\n git_worktree_request: GitWorktreeRequest,\n maybe_git_binary: MaybeGitBinary,\n) -> MaybeGitWorktree:\n if not maybe_git_binary.git_binary:\n return MaybeGitWorktree()\n\n git_binary = maybe_git_binary.git_binary\n cmd = [\"rev-parse\", \"--show-toplevel\"]\n\n try:\n if git_worktree_request.subdir:\n with pushd(str(git_worktree_request.subdir)):\n output = git_binary._invoke_unsandboxed(cmd)\n else:\n output = git_binary._invoke_unsandboxed(cmd)\n except GitBinaryException as e:\n logger.info(f\"No git repository at {os.getcwd()}: {e!r}\")\n return MaybeGitWorktree()\n\n git_worktree = GitWorktree(\n binary=git_binary,\n gitdir=git_worktree_request.gitdir,\n worktree=PurePath(output),\n )\n\n logger.debug(\n f\"Detected git repository at {git_worktree.worktree} on branch {git_worktree.branch_name}\"\n )\n return MaybeGitWorktree(git_worktree=git_worktree)\n\n\ndef rules():\n return [*collect_rules()]\n", "path": "src/python/pants/vcs/git.py"}]}
| 3,700 | 667 |
gh_patches_debug_22089
|
rasdani/github-patches
|
git_diff
|
pypa__pip-12578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
-vv is not passed to build env install subprocesses
### Description
While using `-vv` I noticed seeing a big pause between these two output lines:
`
Installing backend dependencies: started
Installing backend dependencies: finished with status 'done'
`
Clearly a lot of stuff was happening - like wheel building - but there was no output
It turns out that when -vv was introduced in #9450 this higher verbosity level was not passed onto these subprocesses
### Expected behavior
_No response_
### pip version
24.0
### Python version
3.9
### OS
RHEL
### How to Reproduce
Compare the logging output from
```
rm -rf ~/.cache/pip && rm -f *.whl && pip -vv wheel --no-binary :all: hatchling
```
before and after the patch. I'm seeing 1k lines before and 12k lines after.
### Output
_No response_
### Code of Conduct
- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/build_env.py`
Content:
```
1 """Build Environment used for isolation during sdist building
2 """
3
4 import logging
5 import os
6 import pathlib
7 import site
8 import sys
9 import textwrap
10 from collections import OrderedDict
11 from types import TracebackType
12 from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union
13
14 from pip._vendor.certifi import where
15 from pip._vendor.packaging.requirements import Requirement
16 from pip._vendor.packaging.version import Version
17
18 from pip import __file__ as pip_location
19 from pip._internal.cli.spinners import open_spinner
20 from pip._internal.locations import get_platlib, get_purelib, get_scheme
21 from pip._internal.metadata import get_default_environment, get_environment
22 from pip._internal.utils.subprocess import call_subprocess
23 from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
24
25 if TYPE_CHECKING:
26 from pip._internal.index.package_finder import PackageFinder
27
28 logger = logging.getLogger(__name__)
29
30
31 def _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]:
32 return (a, b) if a != b else (a,)
33
34
35 class _Prefix:
36 def __init__(self, path: str) -> None:
37 self.path = path
38 self.setup = False
39 scheme = get_scheme("", prefix=path)
40 self.bin_dir = scheme.scripts
41 self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)
42
43
44 def get_runnable_pip() -> str:
45 """Get a file to pass to a Python executable, to run the currently-running pip.
46
47 This is used to run a pip subprocess, for installing requirements into the build
48 environment.
49 """
50 source = pathlib.Path(pip_location).resolve().parent
51
52 if not source.is_dir():
53 # This would happen if someone is using pip from inside a zip file. In that
54 # case, we can use that directly.
55 return str(source)
56
57 return os.fsdecode(source / "__pip-runner__.py")
58
59
60 def _get_system_sitepackages() -> Set[str]:
61 """Get system site packages
62
63 Usually from site.getsitepackages,
64 but fallback on `get_purelib()/get_platlib()` if unavailable
65 (e.g. in a virtualenv created by virtualenv<20)
66
67 Returns normalized set of strings.
68 """
69 if hasattr(site, "getsitepackages"):
70 system_sites = site.getsitepackages()
71 else:
72 # virtualenv < 20 overwrites site.py without getsitepackages
73 # fallback on get_purelib/get_platlib.
74 # this is known to miss things, but shouldn't in the cases
75 # where getsitepackages() has been removed (inside a virtualenv)
76 system_sites = [get_purelib(), get_platlib()]
77 return {os.path.normcase(path) for path in system_sites}
78
79
80 class BuildEnvironment:
81 """Creates and manages an isolated environment to install build deps"""
82
83 def __init__(self) -> None:
84 temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)
85
86 self._prefixes = OrderedDict(
87 (name, _Prefix(os.path.join(temp_dir.path, name)))
88 for name in ("normal", "overlay")
89 )
90
91 self._bin_dirs: List[str] = []
92 self._lib_dirs: List[str] = []
93 for prefix in reversed(list(self._prefixes.values())):
94 self._bin_dirs.append(prefix.bin_dir)
95 self._lib_dirs.extend(prefix.lib_dirs)
96
97 # Customize site to:
98 # - ensure .pth files are honored
99 # - prevent access to system site packages
100 system_sites = _get_system_sitepackages()
101
102 self._site_dir = os.path.join(temp_dir.path, "site")
103 if not os.path.exists(self._site_dir):
104 os.mkdir(self._site_dir)
105 with open(
106 os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8"
107 ) as fp:
108 fp.write(
109 textwrap.dedent(
110 """
111 import os, site, sys
112
113 # First, drop system-sites related paths.
114 original_sys_path = sys.path[:]
115 known_paths = set()
116 for path in {system_sites!r}:
117 site.addsitedir(path, known_paths=known_paths)
118 system_paths = set(
119 os.path.normcase(path)
120 for path in sys.path[len(original_sys_path):]
121 )
122 original_sys_path = [
123 path for path in original_sys_path
124 if os.path.normcase(path) not in system_paths
125 ]
126 sys.path = original_sys_path
127
128 # Second, add lib directories.
129 # ensuring .pth file are processed.
130 for path in {lib_dirs!r}:
131 assert not path in sys.path
132 site.addsitedir(path)
133 """
134 ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)
135 )
136
137 def __enter__(self) -> None:
138 self._save_env = {
139 name: os.environ.get(name, None)
140 for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH")
141 }
142
143 path = self._bin_dirs[:]
144 old_path = self._save_env["PATH"]
145 if old_path:
146 path.extend(old_path.split(os.pathsep))
147
148 pythonpath = [self._site_dir]
149
150 os.environ.update(
151 {
152 "PATH": os.pathsep.join(path),
153 "PYTHONNOUSERSITE": "1",
154 "PYTHONPATH": os.pathsep.join(pythonpath),
155 }
156 )
157
158 def __exit__(
159 self,
160 exc_type: Optional[Type[BaseException]],
161 exc_val: Optional[BaseException],
162 exc_tb: Optional[TracebackType],
163 ) -> None:
164 for varname, old_value in self._save_env.items():
165 if old_value is None:
166 os.environ.pop(varname, None)
167 else:
168 os.environ[varname] = old_value
169
170 def check_requirements(
171 self, reqs: Iterable[str]
172 ) -> Tuple[Set[Tuple[str, str]], Set[str]]:
173 """Return 2 sets:
174 - conflicting requirements: set of (installed, wanted) reqs tuples
175 - missing requirements: set of reqs
176 """
177 missing = set()
178 conflicting = set()
179 if reqs:
180 env = (
181 get_environment(self._lib_dirs)
182 if hasattr(self, "_lib_dirs")
183 else get_default_environment()
184 )
185 for req_str in reqs:
186 req = Requirement(req_str)
187 # We're explicitly evaluating with an empty extra value, since build
188 # environments are not provided any mechanism to select specific extras.
189 if req.marker is not None and not req.marker.evaluate({"extra": ""}):
190 continue
191 dist = env.get_distribution(req.name)
192 if not dist:
193 missing.add(req_str)
194 continue
195 if isinstance(dist.version, Version):
196 installed_req_str = f"{req.name}=={dist.version}"
197 else:
198 installed_req_str = f"{req.name}==={dist.version}"
199 if not req.specifier.contains(dist.version, prereleases=True):
200 conflicting.add((installed_req_str, req_str))
201 # FIXME: Consider direct URL?
202 return conflicting, missing
203
204 def install_requirements(
205 self,
206 finder: "PackageFinder",
207 requirements: Iterable[str],
208 prefix_as_string: str,
209 *,
210 kind: str,
211 ) -> None:
212 prefix = self._prefixes[prefix_as_string]
213 assert not prefix.setup
214 prefix.setup = True
215 if not requirements:
216 return
217 self._install_requirements(
218 get_runnable_pip(),
219 finder,
220 requirements,
221 prefix,
222 kind=kind,
223 )
224
225 @staticmethod
226 def _install_requirements(
227 pip_runnable: str,
228 finder: "PackageFinder",
229 requirements: Iterable[str],
230 prefix: _Prefix,
231 *,
232 kind: str,
233 ) -> None:
234 args: List[str] = [
235 sys.executable,
236 pip_runnable,
237 "install",
238 "--ignore-installed",
239 "--no-user",
240 "--prefix",
241 prefix.path,
242 "--no-warn-script-location",
243 ]
244 if logger.getEffectiveLevel() <= logging.DEBUG:
245 args.append("-v")
246 for format_control in ("no_binary", "only_binary"):
247 formats = getattr(finder.format_control, format_control)
248 args.extend(
249 (
250 "--" + format_control.replace("_", "-"),
251 ",".join(sorted(formats or {":none:"})),
252 )
253 )
254
255 index_urls = finder.index_urls
256 if index_urls:
257 args.extend(["-i", index_urls[0]])
258 for extra_index in index_urls[1:]:
259 args.extend(["--extra-index-url", extra_index])
260 else:
261 args.append("--no-index")
262 for link in finder.find_links:
263 args.extend(["--find-links", link])
264
265 for host in finder.trusted_hosts:
266 args.extend(["--trusted-host", host])
267 if finder.allow_all_prereleases:
268 args.append("--pre")
269 if finder.prefer_binary:
270 args.append("--prefer-binary")
271 args.append("--")
272 args.extend(requirements)
273 extra_environ = {"_PIP_STANDALONE_CERT": where()}
274 with open_spinner(f"Installing {kind}") as spinner:
275 call_subprocess(
276 args,
277 command_desc=f"pip subprocess to install {kind}",
278 spinner=spinner,
279 extra_environ=extra_environ,
280 )
281
282
283 class NoOpBuildEnvironment(BuildEnvironment):
284 """A no-op drop-in replacement for BuildEnvironment"""
285
286 def __init__(self) -> None:
287 pass
288
289 def __enter__(self) -> None:
290 pass
291
292 def __exit__(
293 self,
294 exc_type: Optional[Type[BaseException]],
295 exc_val: Optional[BaseException],
296 exc_tb: Optional[TracebackType],
297 ) -> None:
298 pass
299
300 def cleanup(self) -> None:
301 pass
302
303 def install_requirements(
304 self,
305 finder: "PackageFinder",
306 requirements: Iterable[str],
307 prefix_as_string: str,
308 *,
309 kind: str,
310 ) -> None:
311 raise NotImplementedError()
312
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/build_env.py b/src/pip/_internal/build_env.py
--- a/src/pip/_internal/build_env.py
+++ b/src/pip/_internal/build_env.py
@@ -19,6 +19,7 @@
from pip._internal.cli.spinners import open_spinner
from pip._internal.locations import get_platlib, get_purelib, get_scheme
from pip._internal.metadata import get_default_environment, get_environment
+from pip._internal.utils.logging import VERBOSE
from pip._internal.utils.subprocess import call_subprocess
from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
@@ -242,6 +243,8 @@
"--no-warn-script-location",
]
if logger.getEffectiveLevel() <= logging.DEBUG:
+ args.append("-vv")
+ elif logger.getEffectiveLevel() <= VERBOSE:
args.append("-v")
for format_control in ("no_binary", "only_binary"):
formats = getattr(finder.format_control, format_control)
|
{"golden_diff": "diff --git a/src/pip/_internal/build_env.py b/src/pip/_internal/build_env.py\n--- a/src/pip/_internal/build_env.py\n+++ b/src/pip/_internal/build_env.py\n@@ -19,6 +19,7 @@\n from pip._internal.cli.spinners import open_spinner\n from pip._internal.locations import get_platlib, get_purelib, get_scheme\n from pip._internal.metadata import get_default_environment, get_environment\n+from pip._internal.utils.logging import VERBOSE\n from pip._internal.utils.subprocess import call_subprocess\n from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds\n \n@@ -242,6 +243,8 @@\n \"--no-warn-script-location\",\n ]\n if logger.getEffectiveLevel() <= logging.DEBUG:\n+ args.append(\"-vv\")\n+ elif logger.getEffectiveLevel() <= VERBOSE:\n args.append(\"-v\")\n for format_control in (\"no_binary\", \"only_binary\"):\n formats = getattr(finder.format_control, format_control)\n", "issue": "-vv is not passed to build env install subprocesses\n### Description\n\nWhile using `-vv` I noticed seeing a big pause between these two output lines:\r\n\r\n`\r\n Installing backend dependencies: started\r\n Installing backend dependencies: finished with status 'done'\r\n`\r\n\r\nClearly a lot of stuff was happening - like wheel building - but there was no output\r\n\r\nIt turns out that when -vv was introduced in #9450 this higher verbosity level was not passed onto these subprocesses\n\n### Expected behavior\n\n_No response_\n\n### pip version\n\n24.0\n\n### Python version\n\n3.9\n\n### OS\n\nRHEL\n\n### How to Reproduce\n\nCompare the logging output from\r\n\r\n```\r\nrm -rf ~/.cache/pip && rm -f *.whl && pip -vv wheel --no-binary :all: hatchling\r\n```\r\n\r\nbefore and after the patch. I'm seeing 1k lines before and 12k lines after.\n\n### Output\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "\"\"\"Build Environment used for isolation during sdist building\n\"\"\"\n\nimport logging\nimport os\nimport pathlib\nimport site\nimport sys\nimport textwrap\nfrom collections import OrderedDict\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union\n\nfrom pip._vendor.certifi import where\nfrom pip._vendor.packaging.requirements import Requirement\nfrom pip._vendor.packaging.version import Version\n\nfrom pip import __file__ as pip_location\nfrom pip._internal.cli.spinners import open_spinner\nfrom pip._internal.locations import get_platlib, get_purelib, get_scheme\nfrom pip._internal.metadata import get_default_environment, get_environment\nfrom pip._internal.utils.subprocess import call_subprocess\nfrom pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds\n\nif TYPE_CHECKING:\n from pip._internal.index.package_finder import PackageFinder\n\nlogger = logging.getLogger(__name__)\n\n\ndef _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]:\n return (a, b) if a != b else (a,)\n\n\nclass _Prefix:\n def __init__(self, path: str) -> None:\n self.path = path\n self.setup = False\n scheme = get_scheme(\"\", prefix=path)\n self.bin_dir = scheme.scripts\n self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)\n\n\ndef get_runnable_pip() -> str:\n \"\"\"Get a file to pass to a Python executable, to run the currently-running pip.\n\n This is used to run a pip subprocess, for installing requirements into the build\n environment.\n \"\"\"\n source = pathlib.Path(pip_location).resolve().parent\n\n if not source.is_dir():\n # This would happen if someone is using pip from inside a zip file. In that\n # case, we can use that directly.\n return str(source)\n\n return os.fsdecode(source / \"__pip-runner__.py\")\n\n\ndef _get_system_sitepackages() -> Set[str]:\n \"\"\"Get system site packages\n\n Usually from site.getsitepackages,\n but fallback on `get_purelib()/get_platlib()` if unavailable\n (e.g. in a virtualenv created by virtualenv<20)\n\n Returns normalized set of strings.\n \"\"\"\n if hasattr(site, \"getsitepackages\"):\n system_sites = site.getsitepackages()\n else:\n # virtualenv < 20 overwrites site.py without getsitepackages\n # fallback on get_purelib/get_platlib.\n # this is known to miss things, but shouldn't in the cases\n # where getsitepackages() has been removed (inside a virtualenv)\n system_sites = [get_purelib(), get_platlib()]\n return {os.path.normcase(path) for path in system_sites}\n\n\nclass BuildEnvironment:\n \"\"\"Creates and manages an isolated environment to install build deps\"\"\"\n\n def __init__(self) -> None:\n temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)\n\n self._prefixes = OrderedDict(\n (name, _Prefix(os.path.join(temp_dir.path, name)))\n for name in (\"normal\", \"overlay\")\n )\n\n self._bin_dirs: List[str] = []\n self._lib_dirs: List[str] = []\n for prefix in reversed(list(self._prefixes.values())):\n self._bin_dirs.append(prefix.bin_dir)\n self._lib_dirs.extend(prefix.lib_dirs)\n\n # Customize site to:\n # - ensure .pth files are honored\n # - prevent access to system site packages\n system_sites = _get_system_sitepackages()\n\n self._site_dir = os.path.join(temp_dir.path, \"site\")\n if not os.path.exists(self._site_dir):\n os.mkdir(self._site_dir)\n with open(\n os.path.join(self._site_dir, \"sitecustomize.py\"), \"w\", encoding=\"utf-8\"\n ) as fp:\n fp.write(\n textwrap.dedent(\n \"\"\"\n import os, site, sys\n\n # First, drop system-sites related paths.\n original_sys_path = sys.path[:]\n known_paths = set()\n for path in {system_sites!r}:\n site.addsitedir(path, known_paths=known_paths)\n system_paths = set(\n os.path.normcase(path)\n for path in sys.path[len(original_sys_path):]\n )\n original_sys_path = [\n path for path in original_sys_path\n if os.path.normcase(path) not in system_paths\n ]\n sys.path = original_sys_path\n\n # Second, add lib directories.\n # ensuring .pth file are processed.\n for path in {lib_dirs!r}:\n assert not path in sys.path\n site.addsitedir(path)\n \"\"\"\n ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)\n )\n\n def __enter__(self) -> None:\n self._save_env = {\n name: os.environ.get(name, None)\n for name in (\"PATH\", \"PYTHONNOUSERSITE\", \"PYTHONPATH\")\n }\n\n path = self._bin_dirs[:]\n old_path = self._save_env[\"PATH\"]\n if old_path:\n path.extend(old_path.split(os.pathsep))\n\n pythonpath = [self._site_dir]\n\n os.environ.update(\n {\n \"PATH\": os.pathsep.join(path),\n \"PYTHONNOUSERSITE\": \"1\",\n \"PYTHONPATH\": os.pathsep.join(pythonpath),\n }\n )\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n for varname, old_value in self._save_env.items():\n if old_value is None:\n os.environ.pop(varname, None)\n else:\n os.environ[varname] = old_value\n\n def check_requirements(\n self, reqs: Iterable[str]\n ) -> Tuple[Set[Tuple[str, str]], Set[str]]:\n \"\"\"Return 2 sets:\n - conflicting requirements: set of (installed, wanted) reqs tuples\n - missing requirements: set of reqs\n \"\"\"\n missing = set()\n conflicting = set()\n if reqs:\n env = (\n get_environment(self._lib_dirs)\n if hasattr(self, \"_lib_dirs\")\n else get_default_environment()\n )\n for req_str in reqs:\n req = Requirement(req_str)\n # We're explicitly evaluating with an empty extra value, since build\n # environments are not provided any mechanism to select specific extras.\n if req.marker is not None and not req.marker.evaluate({\"extra\": \"\"}):\n continue\n dist = env.get_distribution(req.name)\n if not dist:\n missing.add(req_str)\n continue\n if isinstance(dist.version, Version):\n installed_req_str = f\"{req.name}=={dist.version}\"\n else:\n installed_req_str = f\"{req.name}==={dist.version}\"\n if not req.specifier.contains(dist.version, prereleases=True):\n conflicting.add((installed_req_str, req_str))\n # FIXME: Consider direct URL?\n return conflicting, missing\n\n def install_requirements(\n self,\n finder: \"PackageFinder\",\n requirements: Iterable[str],\n prefix_as_string: str,\n *,\n kind: str,\n ) -> None:\n prefix = self._prefixes[prefix_as_string]\n assert not prefix.setup\n prefix.setup = True\n if not requirements:\n return\n self._install_requirements(\n get_runnable_pip(),\n finder,\n requirements,\n prefix,\n kind=kind,\n )\n\n @staticmethod\n def _install_requirements(\n pip_runnable: str,\n finder: \"PackageFinder\",\n requirements: Iterable[str],\n prefix: _Prefix,\n *,\n kind: str,\n ) -> None:\n args: List[str] = [\n sys.executable,\n pip_runnable,\n \"install\",\n \"--ignore-installed\",\n \"--no-user\",\n \"--prefix\",\n prefix.path,\n \"--no-warn-script-location\",\n ]\n if logger.getEffectiveLevel() <= logging.DEBUG:\n args.append(\"-v\")\n for format_control in (\"no_binary\", \"only_binary\"):\n formats = getattr(finder.format_control, format_control)\n args.extend(\n (\n \"--\" + format_control.replace(\"_\", \"-\"),\n \",\".join(sorted(formats or {\":none:\"})),\n )\n )\n\n index_urls = finder.index_urls\n if index_urls:\n args.extend([\"-i\", index_urls[0]])\n for extra_index in index_urls[1:]:\n args.extend([\"--extra-index-url\", extra_index])\n else:\n args.append(\"--no-index\")\n for link in finder.find_links:\n args.extend([\"--find-links\", link])\n\n for host in finder.trusted_hosts:\n args.extend([\"--trusted-host\", host])\n if finder.allow_all_prereleases:\n args.append(\"--pre\")\n if finder.prefer_binary:\n args.append(\"--prefer-binary\")\n args.append(\"--\")\n args.extend(requirements)\n extra_environ = {\"_PIP_STANDALONE_CERT\": where()}\n with open_spinner(f\"Installing {kind}\") as spinner:\n call_subprocess(\n args,\n command_desc=f\"pip subprocess to install {kind}\",\n spinner=spinner,\n extra_environ=extra_environ,\n )\n\n\nclass NoOpBuildEnvironment(BuildEnvironment):\n \"\"\"A no-op drop-in replacement for BuildEnvironment\"\"\"\n\n def __init__(self) -> None:\n pass\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n pass\n\n def cleanup(self) -> None:\n pass\n\n def install_requirements(\n self,\n finder: \"PackageFinder\",\n requirements: Iterable[str],\n prefix_as_string: str,\n *,\n kind: str,\n ) -> None:\n raise NotImplementedError()\n", "path": "src/pip/_internal/build_env.py"}], "after_files": [{"content": "\"\"\"Build Environment used for isolation during sdist building\n\"\"\"\n\nimport logging\nimport os\nimport pathlib\nimport site\nimport sys\nimport textwrap\nfrom collections import OrderedDict\nfrom types import TracebackType\nfrom typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union\n\nfrom pip._vendor.certifi import where\nfrom pip._vendor.packaging.requirements import Requirement\nfrom pip._vendor.packaging.version import Version\n\nfrom pip import __file__ as pip_location\nfrom pip._internal.cli.spinners import open_spinner\nfrom pip._internal.locations import get_platlib, get_purelib, get_scheme\nfrom pip._internal.metadata import get_default_environment, get_environment\nfrom pip._internal.utils.logging import VERBOSE\nfrom pip._internal.utils.subprocess import call_subprocess\nfrom pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds\n\nif TYPE_CHECKING:\n from pip._internal.index.package_finder import PackageFinder\n\nlogger = logging.getLogger(__name__)\n\n\ndef _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]:\n return (a, b) if a != b else (a,)\n\n\nclass _Prefix:\n def __init__(self, path: str) -> None:\n self.path = path\n self.setup = False\n scheme = get_scheme(\"\", prefix=path)\n self.bin_dir = scheme.scripts\n self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)\n\n\ndef get_runnable_pip() -> str:\n \"\"\"Get a file to pass to a Python executable, to run the currently-running pip.\n\n This is used to run a pip subprocess, for installing requirements into the build\n environment.\n \"\"\"\n source = pathlib.Path(pip_location).resolve().parent\n\n if not source.is_dir():\n # This would happen if someone is using pip from inside a zip file. In that\n # case, we can use that directly.\n return str(source)\n\n return os.fsdecode(source / \"__pip-runner__.py\")\n\n\ndef _get_system_sitepackages() -> Set[str]:\n \"\"\"Get system site packages\n\n Usually from site.getsitepackages,\n but fallback on `get_purelib()/get_platlib()` if unavailable\n (e.g. in a virtualenv created by virtualenv<20)\n\n Returns normalized set of strings.\n \"\"\"\n if hasattr(site, \"getsitepackages\"):\n system_sites = site.getsitepackages()\n else:\n # virtualenv < 20 overwrites site.py without getsitepackages\n # fallback on get_purelib/get_platlib.\n # this is known to miss things, but shouldn't in the cases\n # where getsitepackages() has been removed (inside a virtualenv)\n system_sites = [get_purelib(), get_platlib()]\n return {os.path.normcase(path) for path in system_sites}\n\n\nclass BuildEnvironment:\n \"\"\"Creates and manages an isolated environment to install build deps\"\"\"\n\n def __init__(self) -> None:\n temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)\n\n self._prefixes = OrderedDict(\n (name, _Prefix(os.path.join(temp_dir.path, name)))\n for name in (\"normal\", \"overlay\")\n )\n\n self._bin_dirs: List[str] = []\n self._lib_dirs: List[str] = []\n for prefix in reversed(list(self._prefixes.values())):\n self._bin_dirs.append(prefix.bin_dir)\n self._lib_dirs.extend(prefix.lib_dirs)\n\n # Customize site to:\n # - ensure .pth files are honored\n # - prevent access to system site packages\n system_sites = _get_system_sitepackages()\n\n self._site_dir = os.path.join(temp_dir.path, \"site\")\n if not os.path.exists(self._site_dir):\n os.mkdir(self._site_dir)\n with open(\n os.path.join(self._site_dir, \"sitecustomize.py\"), \"w\", encoding=\"utf-8\"\n ) as fp:\n fp.write(\n textwrap.dedent(\n \"\"\"\n import os, site, sys\n\n # First, drop system-sites related paths.\n original_sys_path = sys.path[:]\n known_paths = set()\n for path in {system_sites!r}:\n site.addsitedir(path, known_paths=known_paths)\n system_paths = set(\n os.path.normcase(path)\n for path in sys.path[len(original_sys_path):]\n )\n original_sys_path = [\n path for path in original_sys_path\n if os.path.normcase(path) not in system_paths\n ]\n sys.path = original_sys_path\n\n # Second, add lib directories.\n # ensuring .pth file are processed.\n for path in {lib_dirs!r}:\n assert not path in sys.path\n site.addsitedir(path)\n \"\"\"\n ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)\n )\n\n def __enter__(self) -> None:\n self._save_env = {\n name: os.environ.get(name, None)\n for name in (\"PATH\", \"PYTHONNOUSERSITE\", \"PYTHONPATH\")\n }\n\n path = self._bin_dirs[:]\n old_path = self._save_env[\"PATH\"]\n if old_path:\n path.extend(old_path.split(os.pathsep))\n\n pythonpath = [self._site_dir]\n\n os.environ.update(\n {\n \"PATH\": os.pathsep.join(path),\n \"PYTHONNOUSERSITE\": \"1\",\n \"PYTHONPATH\": os.pathsep.join(pythonpath),\n }\n )\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n for varname, old_value in self._save_env.items():\n if old_value is None:\n os.environ.pop(varname, None)\n else:\n os.environ[varname] = old_value\n\n def check_requirements(\n self, reqs: Iterable[str]\n ) -> Tuple[Set[Tuple[str, str]], Set[str]]:\n \"\"\"Return 2 sets:\n - conflicting requirements: set of (installed, wanted) reqs tuples\n - missing requirements: set of reqs\n \"\"\"\n missing = set()\n conflicting = set()\n if reqs:\n env = (\n get_environment(self._lib_dirs)\n if hasattr(self, \"_lib_dirs\")\n else get_default_environment()\n )\n for req_str in reqs:\n req = Requirement(req_str)\n # We're explicitly evaluating with an empty extra value, since build\n # environments are not provided any mechanism to select specific extras.\n if req.marker is not None and not req.marker.evaluate({\"extra\": \"\"}):\n continue\n dist = env.get_distribution(req.name)\n if not dist:\n missing.add(req_str)\n continue\n if isinstance(dist.version, Version):\n installed_req_str = f\"{req.name}=={dist.version}\"\n else:\n installed_req_str = f\"{req.name}==={dist.version}\"\n if not req.specifier.contains(dist.version, prereleases=True):\n conflicting.add((installed_req_str, req_str))\n # FIXME: Consider direct URL?\n return conflicting, missing\n\n def install_requirements(\n self,\n finder: \"PackageFinder\",\n requirements: Iterable[str],\n prefix_as_string: str,\n *,\n kind: str,\n ) -> None:\n prefix = self._prefixes[prefix_as_string]\n assert not prefix.setup\n prefix.setup = True\n if not requirements:\n return\n self._install_requirements(\n get_runnable_pip(),\n finder,\n requirements,\n prefix,\n kind=kind,\n )\n\n @staticmethod\n def _install_requirements(\n pip_runnable: str,\n finder: \"PackageFinder\",\n requirements: Iterable[str],\n prefix: _Prefix,\n *,\n kind: str,\n ) -> None:\n args: List[str] = [\n sys.executable,\n pip_runnable,\n \"install\",\n \"--ignore-installed\",\n \"--no-user\",\n \"--prefix\",\n prefix.path,\n \"--no-warn-script-location\",\n ]\n if logger.getEffectiveLevel() <= logging.DEBUG:\n args.append(\"-vv\")\n elif logger.getEffectiveLevel() <= VERBOSE:\n args.append(\"-v\")\n for format_control in (\"no_binary\", \"only_binary\"):\n formats = getattr(finder.format_control, format_control)\n args.extend(\n (\n \"--\" + format_control.replace(\"_\", \"-\"),\n \",\".join(sorted(formats or {\":none:\"})),\n )\n )\n\n index_urls = finder.index_urls\n if index_urls:\n args.extend([\"-i\", index_urls[0]])\n for extra_index in index_urls[1:]:\n args.extend([\"--extra-index-url\", extra_index])\n else:\n args.append(\"--no-index\")\n for link in finder.find_links:\n args.extend([\"--find-links\", link])\n\n for host in finder.trusted_hosts:\n args.extend([\"--trusted-host\", host])\n if finder.allow_all_prereleases:\n args.append(\"--pre\")\n if finder.prefer_binary:\n args.append(\"--prefer-binary\")\n args.append(\"--\")\n args.extend(requirements)\n extra_environ = {\"_PIP_STANDALONE_CERT\": where()}\n with open_spinner(f\"Installing {kind}\") as spinner:\n call_subprocess(\n args,\n command_desc=f\"pip subprocess to install {kind}\",\n spinner=spinner,\n extra_environ=extra_environ,\n )\n\n\nclass NoOpBuildEnvironment(BuildEnvironment):\n \"\"\"A no-op drop-in replacement for BuildEnvironment\"\"\"\n\n def __init__(self) -> None:\n pass\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_val: Optional[BaseException],\n exc_tb: Optional[TracebackType],\n ) -> None:\n pass\n\n def cleanup(self) -> None:\n pass\n\n def install_requirements(\n self,\n finder: \"PackageFinder\",\n requirements: Iterable[str],\n prefix_as_string: str,\n *,\n kind: str,\n ) -> None:\n raise NotImplementedError()\n", "path": "src/pip/_internal/build_env.py"}]}
| 3,532 | 221 |
gh_patches_debug_36522
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-9975
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `data_structures/binary_tree/segment_tree.py`
Content:
```
1 import math
2
3
4 class SegmentTree:
5 def __init__(self, a):
6 self.N = len(a)
7 self.st = [0] * (
8 4 * self.N
9 ) # approximate the overall size of segment tree with array N
10 if self.N:
11 self.build(1, 0, self.N - 1)
12
13 def left(self, idx):
14 return idx * 2
15
16 def right(self, idx):
17 return idx * 2 + 1
18
19 def build(self, idx, l, r): # noqa: E741
20 if l == r:
21 self.st[idx] = A[l]
22 else:
23 mid = (l + r) // 2
24 self.build(self.left(idx), l, mid)
25 self.build(self.right(idx), mid + 1, r)
26 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
27
28 def update(self, a, b, val):
29 return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
30
31 def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
32 """
33 update(1, 1, N, a, b, v) for update val v to [a,b]
34 """
35 if r < a or l > b:
36 return True
37 if l == r:
38 self.st[idx] = val
39 return True
40 mid = (l + r) // 2
41 self.update_recursive(self.left(idx), l, mid, a, b, val)
42 self.update_recursive(self.right(idx), mid + 1, r, a, b, val)
43 self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
44 return True
45
46 def query(self, a, b):
47 return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
48
49 def query_recursive(self, idx, l, r, a, b): # noqa: E741
50 """
51 query(1, 1, N, a, b) for query max of [a,b]
52 """
53 if r < a or l > b:
54 return -math.inf
55 if l >= a and r <= b:
56 return self.st[idx]
57 mid = (l + r) // 2
58 q1 = self.query_recursive(self.left(idx), l, mid, a, b)
59 q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)
60 return max(q1, q2)
61
62 def show_data(self):
63 show_list = []
64 for i in range(1, N + 1):
65 show_list += [self.query(i, i)]
66 print(show_list)
67
68
69 if __name__ == "__main__":
70 A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
71 N = 15
72 segt = SegmentTree(A)
73 print(segt.query(4, 6))
74 print(segt.query(7, 11))
75 print(segt.query(7, 12))
76 segt.update(1, 3, 111)
77 print(segt.query(1, 15))
78 segt.update(7, 8, 235)
79 segt.show_data()
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py
--- a/data_structures/binary_tree/segment_tree.py
+++ b/data_structures/binary_tree/segment_tree.py
@@ -3,7 +3,8 @@
class SegmentTree:
def __init__(self, a):
- self.N = len(a)
+ self.A = a
+ self.N = len(self.A)
self.st = [0] * (
4 * self.N
) # approximate the overall size of segment tree with array N
@@ -11,14 +12,32 @@
self.build(1, 0, self.N - 1)
def left(self, idx):
+ """
+ Returns the left child index for a given index in a binary tree.
+
+ >>> s = SegmentTree([1, 2, 3])
+ >>> s.left(1)
+ 2
+ >>> s.left(2)
+ 4
+ """
return idx * 2
def right(self, idx):
+ """
+ Returns the right child index for a given index in a binary tree.
+
+ >>> s = SegmentTree([1, 2, 3])
+ >>> s.right(1)
+ 3
+ >>> s.right(2)
+ 5
+ """
return idx * 2 + 1
def build(self, idx, l, r): # noqa: E741
if l == r:
- self.st[idx] = A[l]
+ self.st[idx] = self.A[l]
else:
mid = (l + r) // 2
self.build(self.left(idx), l, mid)
@@ -26,6 +45,15 @@
self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])
def update(self, a, b, val):
+ """
+ Update the values in the segment tree in the range [a,b] with the given value.
+
+ >>> s = SegmentTree([1, 2, 3, 4, 5])
+ >>> s.update(2, 4, 10)
+ True
+ >>> s.query(1, 5)
+ 10
+ """
return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)
def update_recursive(self, idx, l, r, a, b, val): # noqa: E741
@@ -44,6 +72,15 @@
return True
def query(self, a, b):
+ """
+ Query the maximum value in the range [a,b].
+
+ >>> s = SegmentTree([1, 2, 3, 4, 5])
+ >>> s.query(1, 3)
+ 3
+ >>> s.query(1, 5)
+ 5
+ """
return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)
def query_recursive(self, idx, l, r, a, b): # noqa: E741
|
{"golden_diff": "diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py\n--- a/data_structures/binary_tree/segment_tree.py\n+++ b/data_structures/binary_tree/segment_tree.py\n@@ -3,7 +3,8 @@\n \n class SegmentTree:\n def __init__(self, a):\n- self.N = len(a)\n+ self.A = a\n+ self.N = len(self.A)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n@@ -11,14 +12,32 @@\n self.build(1, 0, self.N - 1)\n \n def left(self, idx):\n+ \"\"\"\n+ Returns the left child index for a given index in a binary tree.\n+\n+ >>> s = SegmentTree([1, 2, 3])\n+ >>> s.left(1)\n+ 2\n+ >>> s.left(2)\n+ 4\n+ \"\"\"\n return idx * 2\n \n def right(self, idx):\n+ \"\"\"\n+ Returns the right child index for a given index in a binary tree.\n+\n+ >>> s = SegmentTree([1, 2, 3])\n+ >>> s.right(1)\n+ 3\n+ >>> s.right(2)\n+ 5\n+ \"\"\"\n return idx * 2 + 1\n \n def build(self, idx, l, r): # noqa: E741\n if l == r:\n- self.st[idx] = A[l]\n+ self.st[idx] = self.A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n@@ -26,6 +45,15 @@\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n \n def update(self, a, b, val):\n+ \"\"\"\n+ Update the values in the segment tree in the range [a,b] with the given value.\n+\n+ >>> s = SegmentTree([1, 2, 3, 4, 5])\n+ >>> s.update(2, 4, 10)\n+ True\n+ >>> s.query(1, 5)\n+ 10\n+ \"\"\"\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n \n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n@@ -44,6 +72,15 @@\n return True\n \n def query(self, a, b):\n+ \"\"\"\n+ Query the maximum value in the range [a,b].\n+\n+ >>> s = SegmentTree([1, 2, 3, 4, 5])\n+ >>> s.query(1, 3)\n+ 3\n+ >>> s.query(1, 5)\n+ 5\n+ \"\"\"\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n \n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "import math\n\n\nclass SegmentTree:\n def __init__(self, a):\n self.N = len(a)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n if self.N:\n self.build(1, 0, self.N - 1)\n\n def left(self, idx):\n return idx * 2\n\n def right(self, idx):\n return idx * 2 + 1\n\n def build(self, idx, l, r): # noqa: E741\n if l == r:\n self.st[idx] = A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n self.build(self.right(idx), mid + 1, r)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n\n def update(self, a, b, val):\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n\n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n \"\"\"\n update(1, 1, N, a, b, v) for update val v to [a,b]\n \"\"\"\n if r < a or l > b:\n return True\n if l == r:\n self.st[idx] = val\n return True\n mid = (l + r) // 2\n self.update_recursive(self.left(idx), l, mid, a, b, val)\n self.update_recursive(self.right(idx), mid + 1, r, a, b, val)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n return True\n\n def query(self, a, b):\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n\n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n \"\"\"\n query(1, 1, N, a, b) for query max of [a,b]\n \"\"\"\n if r < a or l > b:\n return -math.inf\n if l >= a and r <= b:\n return self.st[idx]\n mid = (l + r) // 2\n q1 = self.query_recursive(self.left(idx), l, mid, a, b)\n q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)\n return max(q1, q2)\n\n def show_data(self):\n show_list = []\n for i in range(1, N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]\n N = 15\n segt = SegmentTree(A)\n print(segt.query(4, 6))\n print(segt.query(7, 11))\n print(segt.query(7, 12))\n segt.update(1, 3, 111)\n print(segt.query(1, 15))\n segt.update(7, 8, 235)\n segt.show_data()\n", "path": "data_structures/binary_tree/segment_tree.py"}], "after_files": [{"content": "import math\n\n\nclass SegmentTree:\n def __init__(self, a):\n self.A = a\n self.N = len(self.A)\n self.st = [0] * (\n 4 * self.N\n ) # approximate the overall size of segment tree with array N\n if self.N:\n self.build(1, 0, self.N - 1)\n\n def left(self, idx):\n \"\"\"\n Returns the left child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.left(1)\n 2\n >>> s.left(2)\n 4\n \"\"\"\n return idx * 2\n\n def right(self, idx):\n \"\"\"\n Returns the right child index for a given index in a binary tree.\n\n >>> s = SegmentTree([1, 2, 3])\n >>> s.right(1)\n 3\n >>> s.right(2)\n 5\n \"\"\"\n return idx * 2 + 1\n\n def build(self, idx, l, r): # noqa: E741\n if l == r:\n self.st[idx] = self.A[l]\n else:\n mid = (l + r) // 2\n self.build(self.left(idx), l, mid)\n self.build(self.right(idx), mid + 1, r)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n\n def update(self, a, b, val):\n \"\"\"\n Update the values in the segment tree in the range [a,b] with the given value.\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.update(2, 4, 10)\n True\n >>> s.query(1, 5)\n 10\n \"\"\"\n return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val)\n\n def update_recursive(self, idx, l, r, a, b, val): # noqa: E741\n \"\"\"\n update(1, 1, N, a, b, v) for update val v to [a,b]\n \"\"\"\n if r < a or l > b:\n return True\n if l == r:\n self.st[idx] = val\n return True\n mid = (l + r) // 2\n self.update_recursive(self.left(idx), l, mid, a, b, val)\n self.update_recursive(self.right(idx), mid + 1, r, a, b, val)\n self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)])\n return True\n\n def query(self, a, b):\n \"\"\"\n Query the maximum value in the range [a,b].\n\n >>> s = SegmentTree([1, 2, 3, 4, 5])\n >>> s.query(1, 3)\n 3\n >>> s.query(1, 5)\n 5\n \"\"\"\n return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1)\n\n def query_recursive(self, idx, l, r, a, b): # noqa: E741\n \"\"\"\n query(1, 1, N, a, b) for query max of [a,b]\n \"\"\"\n if r < a or l > b:\n return -math.inf\n if l >= a and r <= b:\n return self.st[idx]\n mid = (l + r) // 2\n q1 = self.query_recursive(self.left(idx), l, mid, a, b)\n q2 = self.query_recursive(self.right(idx), mid + 1, r, a, b)\n return max(q1, q2)\n\n def show_data(self):\n show_list = []\n for i in range(1, N + 1):\n show_list += [self.query(i, i)]\n print(show_list)\n\n\nif __name__ == \"__main__\":\n A = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]\n N = 15\n segt = SegmentTree(A)\n print(segt.query(4, 6))\n print(segt.query(7, 11))\n print(segt.query(7, 12))\n segt.update(1, 3, 111)\n print(segt.query(1, 15))\n segt.update(7, 8, 235)\n segt.show_data()\n", "path": "data_structures/binary_tree/segment_tree.py"}]}
| 2,048 | 735 |
gh_patches_debug_7508
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-2033
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restarting Qtile leaves a traceback
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/libqtile/ipc.py", line 72, in unpack
assert len(data) >= HDRLEN
AssertionError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/qtile-cmd", line 11, in <module>
load_entry_point('qtile==0.14.2', 'console_scripts', 'qtile-cmd')()
File "/usr/lib/python3.7/site-packages/libqtile/scripts/qtile_cmd.py", line 185, in main
ret = run_function(obj, args.function[0], args.args)
File "/usr/lib/python3.7/site-packages/libqtile/scripts/qtile_cmd.py", line 130, in run_function
ret = func(*args)
File "/usr/lib/python3.7/site-packages/libqtile/command.py", line 114, in __call__
return self.call(self.selectors, self.name, *args, **kwargs)
File "/usr/lib/python3.7/site-packages/libqtile/command.py", line 251, in call
state, val = self.client.call((selectors, name, args, kwargs))
File "/usr/lib/python3.7/site-packages/libqtile/ipc.py", line 213, in call
return self.send(data)
File "/usr/lib/python3.7/site-packages/libqtile/ipc.py", line 206, in send
self.loop.run_until_complete(asyncio.wait_for(client_proto.reply, timeout=10))
File "/usr/lib/python3.7/asyncio/base_events.py", line 584, in run_until_complete
return future.result()
File "/usr/lib/python3.7/asyncio/tasks.py", line 416, in wait_for
return fut.result()
File "/usr/lib/python3.7/site-packages/libqtile/ipc.py", line 152, in eof_received
data, _ = _IPC.unpack(self.recv)
File "/usr/lib/python3.7/site-packages/libqtile/ipc.py", line 78, in unpack
"error reading reply!"
libqtile.ipc.IPCError: error readin
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/ipc.py`
Content:
```
1 # Copyright (c) 2008, Aldo Cortesi. All rights reserved.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 """
22 A simple IPC mechanism for communicating between two local processes. We
23 use marshal to serialize data - this means that both client and server must
24 run the same Python version, and that clients must be trusted (as
25 un-marshalling untrusted data can result in arbitrary code execution).
26 """
27 import asyncio
28 import fcntl
29 import json
30 import marshal
31 import os.path
32 import socket
33 import struct
34 from typing import Any, Optional, Tuple
35
36 from libqtile.log_utils import logger
37 from libqtile.utils import get_cache_dir
38
39 HDRFORMAT = "!L"
40 HDRLEN = struct.calcsize(HDRFORMAT)
41
42 SOCKBASE = "qtilesocket.%s"
43
44
45 def find_sockfile(display: str = None):
46 """Finds the appropriate socket file for the given display"""
47 display = display or os.environ.get("DISPLAY") or ":0.0"
48 if "." not in display:
49 display += ".0"
50 cache_directory = get_cache_dir()
51 return os.path.join(cache_directory, SOCKBASE % display)
52
53
54 class IPCError(Exception):
55 pass
56
57
58 class _IPC:
59 """A helper class to handle properly packing and unpacking messages"""
60
61 @staticmethod
62 def unpack(data: bytes, *, is_json: Optional[bool] = None) -> Tuple[Any, bool]:
63 """Unpack the incoming message
64
65 Parameters
66 ----------
67 data : bytes
68 The incoming message to unpack
69 is_json : Optional[bool]
70 If the message should be unpacked as json. By default, try to
71 unpack json and fallback gracefully to marshalled bytes.
72
73 Returns
74 -------
75 Tuple[Any, bool]
76 A tuple of the unpacked object and a boolean denoting if the
77 message was deserialized using json. If True, the return message
78 should be packed as json.
79 """
80 if is_json is None or is_json:
81 try:
82 return json.loads(data.decode()), True
83 except ValueError as e:
84 if is_json:
85 raise IPCError("Unable to decode json data") from e
86
87 try:
88 assert len(data) >= HDRLEN
89 size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]
90 assert size >= len(data[HDRLEN:])
91 return marshal.loads(data[HDRLEN:HDRLEN + size]), False
92 except AssertionError as e:
93 raise IPCError(
94 "error reading reply! (probably the socket was disconnected)"
95 ) from e
96
97 @staticmethod
98 def pack(msg: Any, *, is_json: bool = False) -> bytes:
99 """Pack the object into a message to pass"""
100 if is_json:
101 json_obj = json.dumps(msg)
102 return json_obj.encode()
103
104 msg_bytes = marshal.dumps(msg)
105 size = struct.pack(HDRFORMAT, len(msg_bytes))
106 return size + msg_bytes
107
108
109 class Client:
110 def __init__(self, fname: str, is_json=False) -> None:
111 """Create a new IPC client
112
113 Parameters
114 ----------
115 fname : str
116 The file path to the file that is used to open the connection to
117 the running IPC server.
118 is_json : bool
119 Pack and unpack messages as json
120 """
121 self.fname = fname
122 self.loop = asyncio.get_event_loop()
123 self.is_json = is_json
124
125 def call(self, data: Any) -> Any:
126 return self.send(data)
127
128 def send(self, msg: Any) -> Any:
129 """Send the message and return the response from the server
130
131 If any exception is raised by the server, that will propogate out of
132 this call.
133 """
134 return self.loop.run_until_complete(self.async_send(msg))
135
136 async def async_send(self, msg: Any) -> Any:
137 """Send the message to the server
138
139 Connect to the server, then pack and send the message to the server,
140 then wait for and return the response from the server.
141 """
142 try:
143 reader, writer = await asyncio.wait_for(
144 asyncio.open_unix_connection(path=self.fname), timeout=3
145 )
146 except (ConnectionRefusedError, FileNotFoundError):
147 raise IPCError("Could not open {}".format(self.fname))
148
149 try:
150 send_data = _IPC.pack(msg, is_json=self.is_json)
151 writer.write(send_data)
152 writer.write_eof()
153
154 read_data = await asyncio.wait_for(reader.read(), timeout=10)
155 except asyncio.TimeoutError:
156 raise IPCError("Server not responding")
157 finally:
158 # see the note in Server._server_callback()
159 writer.close()
160 await writer.wait_closed()
161
162 data, _ = _IPC.unpack(read_data, is_json=self.is_json)
163
164 return data
165
166
167 class Server:
168 def __init__(self, fname: str, handler) -> None:
169 self.fname = fname
170 self.handler = handler
171 self.server = None # type: Optional[asyncio.AbstractServer]
172
173 if os.path.exists(fname):
174 os.unlink(fname)
175
176 self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
177 flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)
178 fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
179 self.sock.bind(self.fname)
180
181 async def _server_callback(
182 self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
183 ) -> None:
184 """Callback when a connection is made to the server
185
186 Read the data sent from the client, execute the requested command, and
187 send the reply back to the client.
188 """
189 try:
190 logger.debug("Connection made to server")
191 data = await reader.read()
192 logger.debug("EOF received by server")
193
194 req, is_json = _IPC.unpack(data)
195 except IPCError:
196 logger.warn("Invalid data received, closing connection")
197 else:
198 if req[1] == "restart":
199 # if we are going to restart, close the connection first, as we won't be back
200 logger.debug("Closing connection on restart")
201 writer.write_eof()
202
203 rep = self.handler(req)
204
205 result = _IPC.pack(rep, is_json=is_json)
206
207 logger.debug("Sending result on receive EOF")
208 writer.write(result)
209 logger.debug("Closing connection on receive EOF")
210 writer.write_eof()
211 finally:
212 writer.close()
213 await writer.wait_closed()
214
215 async def __aenter__(self) -> "Server":
216 """Start and return the server"""
217 await self.start()
218 return self
219
220 async def __aexit__(self, exc_type, exc_value, tb) -> None:
221 """Close and shutdown the server"""
222 await self.close()
223
224 async def start(self) -> None:
225 """Start the server"""
226 assert self.server is None
227
228 logger.debug("Starting server")
229 server_coroutine = asyncio.start_unix_server(
230 self._server_callback, sock=self.sock
231 )
232 self.server = await server_coroutine
233
234 async def close(self) -> None:
235 """Close and shutdown the server"""
236 assert self.server is not None
237
238 logger.debug("Stopping server on close")
239 self.server.close()
240 await self.server.wait_closed()
241
242 self.server = None
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/libqtile/ipc.py b/libqtile/ipc.py
--- a/libqtile/ipc.py
+++ b/libqtile/ipc.py
@@ -195,11 +195,6 @@
except IPCError:
logger.warn("Invalid data received, closing connection")
else:
- if req[1] == "restart":
- # if we are going to restart, close the connection first, as we won't be back
- logger.debug("Closing connection on restart")
- writer.write_eof()
-
rep = self.handler(req)
result = _IPC.pack(rep, is_json=is_json)
|
{"golden_diff": "diff --git a/libqtile/ipc.py b/libqtile/ipc.py\n--- a/libqtile/ipc.py\n+++ b/libqtile/ipc.py\n@@ -195,11 +195,6 @@\n except IPCError:\n logger.warn(\"Invalid data received, closing connection\")\n else:\n- if req[1] == \"restart\":\n- # if we are going to restart, close the connection first, as we won't be back\n- logger.debug(\"Closing connection on restart\")\n- writer.write_eof()\n-\n rep = self.handler(req)\n \n result = _IPC.pack(rep, is_json=is_json)\n", "issue": "Restarting Qtile leaves a traceback\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/libqtile/ipc.py\", line 72, in unpack\r\n assert len(data) >= HDRLEN\r\nAssertionError\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/qtile-cmd\", line 11, in <module>\r\n load_entry_point('qtile==0.14.2', 'console_scripts', 'qtile-cmd')()\r\n File \"/usr/lib/python3.7/site-packages/libqtile/scripts/qtile_cmd.py\", line 185, in main\r\n ret = run_function(obj, args.function[0], args.args)\r\n File \"/usr/lib/python3.7/site-packages/libqtile/scripts/qtile_cmd.py\", line 130, in run_function\r\n ret = func(*args)\r\n File \"/usr/lib/python3.7/site-packages/libqtile/command.py\", line 114, in __call__\r\n return self.call(self.selectors, self.name, *args, **kwargs)\r\n File \"/usr/lib/python3.7/site-packages/libqtile/command.py\", line 251, in call\r\n state, val = self.client.call((selectors, name, args, kwargs))\r\n File \"/usr/lib/python3.7/site-packages/libqtile/ipc.py\", line 213, in call\r\n return self.send(data)\r\n File \"/usr/lib/python3.7/site-packages/libqtile/ipc.py\", line 206, in send\r\n self.loop.run_until_complete(asyncio.wait_for(client_proto.reply, timeout=10))\r\n File \"/usr/lib/python3.7/asyncio/base_events.py\", line 584, in run_until_complete\r\n return future.result()\r\n File \"/usr/lib/python3.7/asyncio/tasks.py\", line 416, in wait_for\r\n return fut.result()\r\n File \"/usr/lib/python3.7/site-packages/libqtile/ipc.py\", line 152, in eof_received\r\n data, _ = _IPC.unpack(self.recv)\r\n File \"/usr/lib/python3.7/site-packages/libqtile/ipc.py\", line 78, in unpack\r\n \"error reading reply!\"\r\nlibqtile.ipc.IPCError: error readin\n", "before_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n A simple IPC mechanism for communicating between two local processes. We\n use marshal to serialize data - this means that both client and server must\n run the same Python version, and that clients must be trusted (as\n un-marshalling untrusted data can result in arbitrary code execution).\n\"\"\"\nimport asyncio\nimport fcntl\nimport json\nimport marshal\nimport os.path\nimport socket\nimport struct\nfrom typing import Any, Optional, Tuple\n\nfrom libqtile.log_utils import logger\nfrom libqtile.utils import get_cache_dir\n\nHDRFORMAT = \"!L\"\nHDRLEN = struct.calcsize(HDRFORMAT)\n\nSOCKBASE = \"qtilesocket.%s\"\n\n\ndef find_sockfile(display: str = None):\n \"\"\"Finds the appropriate socket file for the given display\"\"\"\n display = display or os.environ.get(\"DISPLAY\") or \":0.0\"\n if \".\" not in display:\n display += \".0\"\n cache_directory = get_cache_dir()\n return os.path.join(cache_directory, SOCKBASE % display)\n\n\nclass IPCError(Exception):\n pass\n\n\nclass _IPC:\n \"\"\"A helper class to handle properly packing and unpacking messages\"\"\"\n\n @staticmethod\n def unpack(data: bytes, *, is_json: Optional[bool] = None) -> Tuple[Any, bool]:\n \"\"\"Unpack the incoming message\n\n Parameters\n ----------\n data : bytes\n The incoming message to unpack\n is_json : Optional[bool]\n If the message should be unpacked as json. By default, try to\n unpack json and fallback gracefully to marshalled bytes.\n\n Returns\n -------\n Tuple[Any, bool]\n A tuple of the unpacked object and a boolean denoting if the\n message was deserialized using json. If True, the return message\n should be packed as json.\n \"\"\"\n if is_json is None or is_json:\n try:\n return json.loads(data.decode()), True\n except ValueError as e:\n if is_json:\n raise IPCError(\"Unable to decode json data\") from e\n\n try:\n assert len(data) >= HDRLEN\n size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]\n assert size >= len(data[HDRLEN:])\n return marshal.loads(data[HDRLEN:HDRLEN + size]), False\n except AssertionError as e:\n raise IPCError(\n \"error reading reply! (probably the socket was disconnected)\"\n ) from e\n\n @staticmethod\n def pack(msg: Any, *, is_json: bool = False) -> bytes:\n \"\"\"Pack the object into a message to pass\"\"\"\n if is_json:\n json_obj = json.dumps(msg)\n return json_obj.encode()\n\n msg_bytes = marshal.dumps(msg)\n size = struct.pack(HDRFORMAT, len(msg_bytes))\n return size + msg_bytes\n\n\nclass Client:\n def __init__(self, fname: str, is_json=False) -> None:\n \"\"\"Create a new IPC client\n\n Parameters\n ----------\n fname : str\n The file path to the file that is used to open the connection to\n the running IPC server.\n is_json : bool\n Pack and unpack messages as json\n \"\"\"\n self.fname = fname\n self.loop = asyncio.get_event_loop()\n self.is_json = is_json\n\n def call(self, data: Any) -> Any:\n return self.send(data)\n\n def send(self, msg: Any) -> Any:\n \"\"\"Send the message and return the response from the server\n\n If any exception is raised by the server, that will propogate out of\n this call.\n \"\"\"\n return self.loop.run_until_complete(self.async_send(msg))\n\n async def async_send(self, msg: Any) -> Any:\n \"\"\"Send the message to the server\n\n Connect to the server, then pack and send the message to the server,\n then wait for and return the response from the server.\n \"\"\"\n try:\n reader, writer = await asyncio.wait_for(\n asyncio.open_unix_connection(path=self.fname), timeout=3\n )\n except (ConnectionRefusedError, FileNotFoundError):\n raise IPCError(\"Could not open {}\".format(self.fname))\n\n try:\n send_data = _IPC.pack(msg, is_json=self.is_json)\n writer.write(send_data)\n writer.write_eof()\n\n read_data = await asyncio.wait_for(reader.read(), timeout=10)\n except asyncio.TimeoutError:\n raise IPCError(\"Server not responding\")\n finally:\n # see the note in Server._server_callback()\n writer.close()\n await writer.wait_closed()\n\n data, _ = _IPC.unpack(read_data, is_json=self.is_json)\n\n return data\n\n\nclass Server:\n def __init__(self, fname: str, handler) -> None:\n self.fname = fname\n self.handler = handler\n self.server = None # type: Optional[asyncio.AbstractServer]\n\n if os.path.exists(fname):\n os.unlink(fname)\n\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)\n flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)\n self.sock.bind(self.fname)\n\n async def _server_callback(\n self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n \"\"\"Callback when a connection is made to the server\n\n Read the data sent from the client, execute the requested command, and\n send the reply back to the client.\n \"\"\"\n try:\n logger.debug(\"Connection made to server\")\n data = await reader.read()\n logger.debug(\"EOF received by server\")\n\n req, is_json = _IPC.unpack(data)\n except IPCError:\n logger.warn(\"Invalid data received, closing connection\")\n else:\n if req[1] == \"restart\":\n # if we are going to restart, close the connection first, as we won't be back\n logger.debug(\"Closing connection on restart\")\n writer.write_eof()\n\n rep = self.handler(req)\n\n result = _IPC.pack(rep, is_json=is_json)\n\n logger.debug(\"Sending result on receive EOF\")\n writer.write(result)\n logger.debug(\"Closing connection on receive EOF\")\n writer.write_eof()\n finally:\n writer.close()\n await writer.wait_closed()\n\n async def __aenter__(self) -> \"Server\":\n \"\"\"Start and return the server\"\"\"\n await self.start()\n return self\n\n async def __aexit__(self, exc_type, exc_value, tb) -> None:\n \"\"\"Close and shutdown the server\"\"\"\n await self.close()\n\n async def start(self) -> None:\n \"\"\"Start the server\"\"\"\n assert self.server is None\n\n logger.debug(\"Starting server\")\n server_coroutine = asyncio.start_unix_server(\n self._server_callback, sock=self.sock\n )\n self.server = await server_coroutine\n\n async def close(self) -> None:\n \"\"\"Close and shutdown the server\"\"\"\n assert self.server is not None\n\n logger.debug(\"Stopping server on close\")\n self.server.close()\n await self.server.wait_closed()\n\n self.server = None\n", "path": "libqtile/ipc.py"}], "after_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n A simple IPC mechanism for communicating between two local processes. We\n use marshal to serialize data - this means that both client and server must\n run the same Python version, and that clients must be trusted (as\n un-marshalling untrusted data can result in arbitrary code execution).\n\"\"\"\nimport asyncio\nimport fcntl\nimport json\nimport marshal\nimport os.path\nimport socket\nimport struct\nfrom typing import Any, Optional, Tuple\n\nfrom libqtile.log_utils import logger\nfrom libqtile.utils import get_cache_dir\n\nHDRFORMAT = \"!L\"\nHDRLEN = struct.calcsize(HDRFORMAT)\n\nSOCKBASE = \"qtilesocket.%s\"\n\n\ndef find_sockfile(display: str = None):\n \"\"\"Finds the appropriate socket file for the given display\"\"\"\n display = display or os.environ.get(\"DISPLAY\") or \":0.0\"\n if \".\" not in display:\n display += \".0\"\n cache_directory = get_cache_dir()\n return os.path.join(cache_directory, SOCKBASE % display)\n\n\nclass IPCError(Exception):\n pass\n\n\nclass _IPC:\n \"\"\"A helper class to handle properly packing and unpacking messages\"\"\"\n\n @staticmethod\n def unpack(data: bytes, *, is_json: Optional[bool] = None) -> Tuple[Any, bool]:\n \"\"\"Unpack the incoming message\n\n Parameters\n ----------\n data : bytes\n The incoming message to unpack\n is_json : Optional[bool]\n If the message should be unpacked as json. By default, try to\n unpack json and fallback gracefully to marshalled bytes.\n\n Returns\n -------\n Tuple[Any, bool]\n A tuple of the unpacked object and a boolean denoting if the\n message was deserialized using json. If True, the return message\n should be packed as json.\n \"\"\"\n if is_json is None or is_json:\n try:\n return json.loads(data.decode()), True\n except ValueError as e:\n if is_json:\n raise IPCError(\"Unable to decode json data\") from e\n\n try:\n assert len(data) >= HDRLEN\n size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]\n assert size >= len(data[HDRLEN:])\n return marshal.loads(data[HDRLEN:HDRLEN + size]), False\n except AssertionError as e:\n raise IPCError(\n \"error reading reply! (probably the socket was disconnected)\"\n ) from e\n\n @staticmethod\n def pack(msg: Any, *, is_json: bool = False) -> bytes:\n \"\"\"Pack the object into a message to pass\"\"\"\n if is_json:\n json_obj = json.dumps(msg)\n return json_obj.encode()\n\n msg_bytes = marshal.dumps(msg)\n size = struct.pack(HDRFORMAT, len(msg_bytes))\n return size + msg_bytes\n\n\nclass Client:\n def __init__(self, fname: str, is_json=False) -> None:\n \"\"\"Create a new IPC client\n\n Parameters\n ----------\n fname : str\n The file path to the file that is used to open the connection to\n the running IPC server.\n is_json : bool\n Pack and unpack messages as json\n \"\"\"\n self.fname = fname\n self.loop = asyncio.get_event_loop()\n self.is_json = is_json\n\n def call(self, data: Any) -> Any:\n return self.send(data)\n\n def send(self, msg: Any) -> Any:\n \"\"\"Send the message and return the response from the server\n\n If any exception is raised by the server, that will propogate out of\n this call.\n \"\"\"\n return self.loop.run_until_complete(self.async_send(msg))\n\n async def async_send(self, msg: Any) -> Any:\n \"\"\"Send the message to the server\n\n Connect to the server, then pack and send the message to the server,\n then wait for and return the response from the server.\n \"\"\"\n try:\n reader, writer = await asyncio.wait_for(\n asyncio.open_unix_connection(path=self.fname), timeout=3\n )\n except (ConnectionRefusedError, FileNotFoundError):\n raise IPCError(\"Could not open {}\".format(self.fname))\n\n try:\n send_data = _IPC.pack(msg, is_json=self.is_json)\n writer.write(send_data)\n writer.write_eof()\n\n read_data = await asyncio.wait_for(reader.read(), timeout=10)\n except asyncio.TimeoutError:\n raise IPCError(\"Server not responding\")\n finally:\n # see the note in Server._server_callback()\n writer.close()\n await writer.wait_closed()\n\n data, _ = _IPC.unpack(read_data, is_json=self.is_json)\n\n return data\n\n\nclass Server:\n def __init__(self, fname: str, handler) -> None:\n self.fname = fname\n self.handler = handler\n self.server = None # type: Optional[asyncio.AbstractServer]\n\n if os.path.exists(fname):\n os.unlink(fname)\n\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)\n flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)\n self.sock.bind(self.fname)\n\n async def _server_callback(\n self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n \"\"\"Callback when a connection is made to the server\n\n Read the data sent from the client, execute the requested command, and\n send the reply back to the client.\n \"\"\"\n try:\n logger.debug(\"Connection made to server\")\n data = await reader.read()\n logger.debug(\"EOF received by server\")\n\n req, is_json = _IPC.unpack(data)\n except IPCError:\n logger.warn(\"Invalid data received, closing connection\")\n else:\n rep = self.handler(req)\n\n result = _IPC.pack(rep, is_json=is_json)\n\n logger.debug(\"Sending result on receive EOF\")\n writer.write(result)\n logger.debug(\"Closing connection on receive EOF\")\n writer.write_eof()\n finally:\n writer.close()\n await writer.wait_closed()\n\n async def __aenter__(self) -> \"Server\":\n \"\"\"Start and return the server\"\"\"\n await self.start()\n return self\n\n async def __aexit__(self, exc_type, exc_value, tb) -> None:\n \"\"\"Close and shutdown the server\"\"\"\n await self.close()\n\n async def start(self) -> None:\n \"\"\"Start the server\"\"\"\n assert self.server is None\n\n logger.debug(\"Starting server\")\n server_coroutine = asyncio.start_unix_server(\n self._server_callback, sock=self.sock\n )\n self.server = await server_coroutine\n\n async def close(self) -> None:\n \"\"\"Close and shutdown the server\"\"\"\n assert self.server is not None\n\n logger.debug(\"Stopping server on close\")\n self.server.close()\n await self.server.wait_closed()\n\n self.server = None\n", "path": "libqtile/ipc.py"}]}
| 3,192 | 141 |
gh_patches_debug_33475
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1287
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use OAuth client ids instead of names in Firefox App Targeting.
List of client ids: https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution
Related bug https://bugzilla.mozilla.org/show_bug.cgi?id=1596514#c5
- [ ] After pushing to prod, generate JEXL for related Targets
- [ ] ping mindy when is complete
<!-- probot = {"419912":{"who":"glogiotatidis","what":"","when":"2020-01-08T09:00:00.000Z"}} -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/admin/fields.py`
Content:
```
1 from django.core.exceptions import ValidationError
2 from django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,
3 MultiValueField, MultipleChoiceField)
4
5 from snippets.base.models import Addon, TargetedCountry
6
7 from .widgets import JEXLMultiWidget
8
9
10 class MultipleChoiceFieldCSV(MultipleChoiceField):
11 # To be used with in snippets.base.forms.SnippetAdminForm and in
12 # combination with DynamicField. We don't directly save() this field in the
13 # database so get_prep_value has not been implemented.
14
15 def prepare_value(self, value):
16 value = super(MultipleChoiceFieldCSV, self).prepare_value(value)
17 if not isinstance(value, list):
18 value = value.split(';')
19 return value
20
21 def clean(self, value):
22 value = super(MultipleChoiceFieldCSV, self).clean(value)
23 return ';'.join(value)
24
25
26 class JEXLBaseField():
27 def to_jexl(self, value):
28 if value:
29 return self.jexl.format(attr_name=self.attr_name, value=value)
30
31 return None
32
33
34 class JEXLChoiceField(JEXLBaseField, ChoiceField):
35 def __init__(self, attr_name, *args, **kwargs):
36 self.attr_name = attr_name
37 self.jexl = '{attr_name} == {value}'
38 self.jexl = kwargs.pop('jexl', self.jexl)
39 return super().__init__(*args, **kwargs)
40
41 def to_jexl(self, value):
42 if value:
43 return self.jexl.format(attr_name=self.attr_name, value=value)
44
45
46 class JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):
47 def __init__(self, attr_name, *args, **kwargs):
48 self.attr_name = attr_name
49 self.jexl = '{attr_name} in {value}'
50 self.jexl = kwargs.pop('jexl', self.jexl)
51 return super().__init__(*args, **kwargs)
52
53 def prepare_value(self, value):
54 if isinstance(value, str):
55 value = value.split(';')
56 return super().prepare_value(value)
57
58 def clean(self, value):
59 value = super().clean(value)
60 return ';'.join([str(x.id) for x in value])
61
62
63 class JEXLCountryField(JEXLModelMultipleChoiceField):
64 def to_jexl(self, value):
65 if value:
66 values = TargetedCountry.objects.filter(id__in=value.split(";"))
67 return f'region in {[x.code for x in values]}'
68 return None
69
70
71 class JEXLRangeField(JEXLBaseField, MultiValueField):
72 def __init__(self, attr_name, choices, **kwargs):
73 self.attr_name = attr_name
74 self.jexl = {
75 'minimum': '{value} <= {attr_name}',
76 'maximum': '{attr_name} < {value}'
77 }
78 self.jexl = kwargs.pop('jexl', self.jexl)
79 fields = (
80 ChoiceField(choices=choices),
81 ChoiceField(choices=choices),
82 )
83 super().__init__(fields, **kwargs)
84 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],
85 template_name='widgets/jexlrange.html')
86
87 def compress(self, data_list):
88 return ','.join(data_list)
89
90 def to_jexl(self, value):
91 final_jexl = []
92 if value:
93 minimum, maximum = value.split(',')
94 if minimum:
95 final_jexl.append(
96 self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)
97 )
98 if maximum:
99 final_jexl.append(
100 self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)
101 )
102 return ' && '.join(final_jexl)
103
104 def validate(self, value):
105 minimum, maximum = value.split(',')
106 self.fields[0].validate(minimum)
107 self.fields[1].validate(maximum)
108
109 if minimum and maximum and int(minimum) > int(maximum):
110 raise ValidationError('Minimum value must be lower or equal to maximum value.')
111 return value
112
113
114 class JEXLFirefoxRangeField(JEXLRangeField):
115 def __init__(self, **kwargs):
116 # Include only versions greater than 63, where ASRSnippets exist.
117 min_version = 64
118 # Need to be able to dynamically change this, probably using
119 # product_details. Issue #855
120 max_version = 84
121
122 choices = (
123 [(None, 'No limit')] +
124 [(x, x) for x in reversed(range(min_version, max_version + 1))]
125 )
126 super().__init__('firefoxVersion', choices, **kwargs)
127
128 def validate(self, value):
129 minimum, maximum = value.split(',')
130 self.fields[0].validate(minimum)
131 self.fields[1].validate(maximum)
132
133 if minimum and maximum and minimum > maximum:
134 raise ValidationError('Minimum value must be lower or equal to maximum value.')
135 return value
136
137
138 class JEXLAddonField(MultiValueField):
139 def __init__(self, **kwargs):
140 choices = (
141 (None, "I don't care"),
142 ('not_installed', 'Not Installed'),
143 ('installed', 'Installed'),
144 )
145 fields = (
146 ChoiceField(choices=choices),
147 ModelChoiceField(queryset=Addon.objects.all(), required=False),
148 )
149 super().__init__(fields, **kwargs)
150 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])
151
152 def compress(self, data_list):
153 if data_list:
154 return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))
155 return ''
156
157 def to_jexl(self, value):
158 check, addon_id = value.split(',')
159 if not check or not addon_id:
160 return ''
161
162 addon = Addon.objects.get(id=addon_id)
163 if check == 'not_installed':
164 jexl = '("{}" in addonsInfo.addons|keys) == false'.format(addon.guid)
165 elif check == 'installed':
166 jexl = '("{}" in addonsInfo.addons|keys) == true'.format(addon.guid)
167
168 return jexl
169
170 def validate(self, value):
171 check, addon_id = value.split(',')
172
173 self.fields[0].validate(check)
174 self.fields[1].validate(addon_id)
175
176 if check and not addon_id:
177 raise ValidationError('You must select an add-on')
178
179 if not check and addon_id:
180 raise ValidationError('You must select a check')
181 return value
182
183
184 class JEXLFirefoxServicesField(MultiValueField):
185 def __init__(self, **kwargs):
186 check_choices = (
187 (None, "I don't care"),
188 ('no_account', "User hasn't signed up for"),
189 ('has_account', 'User has signed up for'),
190 )
191 service_choices = (
192 (None, '---------'),
193 ('Firefox Lockwise', 'Firefox Lockwise'),
194 ('Firefox Monitor', 'Firefox Monitor'),
195 ('Firefox Send', 'Firefox Send'),
196 ('Firefox Private Network', 'Firefox Private Network'),
197 ('Notes', 'Notes'),
198 ('Pocket', 'Pocket'),
199 )
200 fields = (
201 ChoiceField(choices=check_choices),
202 ChoiceField(choices=service_choices),
203 )
204 super().__init__(fields, **kwargs)
205 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])
206
207 def compress(self, data_list):
208 if data_list:
209 return f'{data_list[0]},{data_list[1]}'
210 return ''
211
212 def to_jexl(self, value):
213 check, service_name = value.split(',')
214 if not check or not service_name:
215 return ''
216
217 if check == 'no_account':
218 jexl = f'("{service_name}" in attachedFxAOAuthClients|mapToProperty("name")) == false'
219 elif check == 'has_account':
220 jexl = f'("{service_name}" in attachedFxAOAuthClients|mapToProperty("name")) == true'
221
222 return jexl
223
224 def validate(self, value):
225 check, service_name = value.split(',')
226
227 self.fields[0].validate(check)
228 self.fields[1].validate(service_name)
229
230 if check and not service_name:
231 raise ValidationError('You must select an Service.')
232
233 if not check and service_name:
234 raise ValidationError('You must select a check.')
235 return value
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snippets/base/admin/fields.py b/snippets/base/admin/fields.py
--- a/snippets/base/admin/fields.py
+++ b/snippets/base/admin/fields.py
@@ -188,14 +188,19 @@
('no_account', "User hasn't signed up for"),
('has_account', 'User has signed up for'),
)
+ # Verify IDs using
+ # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .
+ # Incomplete list of IDs
+ # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa
service_choices = (
(None, '---------'),
- ('Firefox Lockwise', 'Firefox Lockwise'),
- ('Firefox Monitor', 'Firefox Monitor'),
- ('Firefox Send', 'Firefox Send'),
- ('Firefox Private Network', 'Firefox Private Network'),
- ('Notes', 'Notes'),
- ('Pocket', 'Pocket'),
+ ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),
+ ('802d56ef2a9af9fa', 'Firefox Monitor'),
+ ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),
+ ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),
+ ('7ad9917f6c55fb77', 'Firefox Reality'),
+ ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),
+
)
fields = (
ChoiceField(choices=check_choices),
@@ -210,14 +215,21 @@
return ''
def to_jexl(self, value):
- check, service_name = value.split(',')
- if not check or not service_name:
+ check, ids = value.split(',')
+ ids = ids.split('|') if ids else ''
+
+ if not check or not ids:
return ''
+ jexl = '('
+ for id in ids:
+ jexl += f'("{id}" in attachedFxAOAuthClients|mapToProperty("id")) || '
+ jexl = jexl[:-4]
+
if check == 'no_account':
- jexl = f'("{service_name}" in attachedFxAOAuthClients|mapToProperty("name")) == false'
+ jexl += ') == false'
elif check == 'has_account':
- jexl = f'("{service_name}" in attachedFxAOAuthClients|mapToProperty("name")) == true'
+ jexl += ') == true'
return jexl
|
{"golden_diff": "diff --git a/snippets/base/admin/fields.py b/snippets/base/admin/fields.py\n--- a/snippets/base/admin/fields.py\n+++ b/snippets/base/admin/fields.py\n@@ -188,14 +188,19 @@\n ('no_account', \"User hasn't signed up for\"),\n ('has_account', 'User has signed up for'),\n )\n+ # Verify IDs using\n+ # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .\n+ # Incomplete list of IDs\n+ # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa\n service_choices = (\n (None, '---------'),\n- ('Firefox Lockwise', 'Firefox Lockwise'),\n- ('Firefox Monitor', 'Firefox Monitor'),\n- ('Firefox Send', 'Firefox Send'),\n- ('Firefox Private Network', 'Firefox Private Network'),\n- ('Notes', 'Notes'),\n- ('Pocket', 'Pocket'),\n+ ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n+ ('802d56ef2a9af9fa', 'Firefox Monitor'),\n+ ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n+ ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),\n+ ('7ad9917f6c55fb77', 'Firefox Reality'),\n+ ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n+\n )\n fields = (\n ChoiceField(choices=check_choices),\n@@ -210,14 +215,21 @@\n return ''\n \n def to_jexl(self, value):\n- check, service_name = value.split(',')\n- if not check or not service_name:\n+ check, ids = value.split(',')\n+ ids = ids.split('|') if ids else ''\n+\n+ if not check or not ids:\n return ''\n \n+ jexl = '('\n+ for id in ids:\n+ jexl += f'(\"{id}\" in attachedFxAOAuthClients|mapToProperty(\"id\")) || '\n+ jexl = jexl[:-4]\n+\n if check == 'no_account':\n- jexl = f'(\"{service_name}\" in attachedFxAOAuthClients|mapToProperty(\"name\")) == false'\n+ jexl += ') == false'\n elif check == 'has_account':\n- jexl = f'(\"{service_name}\" in attachedFxAOAuthClients|mapToProperty(\"name\")) == true'\n+ jexl += ') == true'\n \n return jexl\n", "issue": "Use OAuth client ids instead of names in Firefox App Targeting.\nList of client ids: https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution\r\n\r\nRelated bug https://bugzilla.mozilla.org/show_bug.cgi?id=1596514#c5\r\n\r\n - [ ] After pushing to prod, generate JEXL for related Targets\r\n - [ ] ping mindy when is complete\r\n\r\n<!-- probot = {\"419912\":{\"who\":\"glogiotatidis\",\"what\":\"\",\"when\":\"2020-01-08T09:00:00.000Z\"}} -->\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,\n MultiValueField, MultipleChoiceField)\n\nfrom snippets.base.models import Addon, TargetedCountry\n\nfrom .widgets import JEXLMultiWidget\n\n\nclass MultipleChoiceFieldCSV(MultipleChoiceField):\n # To be used with in snippets.base.forms.SnippetAdminForm and in\n # combination with DynamicField. We don't directly save() this field in the\n # database so get_prep_value has not been implemented.\n\n def prepare_value(self, value):\n value = super(MultipleChoiceFieldCSV, self).prepare_value(value)\n if not isinstance(value, list):\n value = value.split(';')\n return value\n\n def clean(self, value):\n value = super(MultipleChoiceFieldCSV, self).clean(value)\n return ';'.join(value)\n\n\nclass JEXLBaseField():\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n return None\n\n\nclass JEXLChoiceField(JEXLBaseField, ChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} == {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n\nclass JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} in {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, str):\n value = value.split(';')\n return super().prepare_value(value)\n\n def clean(self, value):\n value = super().clean(value)\n return ';'.join([str(x.id) for x in value])\n\n\nclass JEXLCountryField(JEXLModelMultipleChoiceField):\n def to_jexl(self, value):\n if value:\n values = TargetedCountry.objects.filter(id__in=value.split(\";\"))\n return f'region in {[x.code for x in values]}'\n return None\n\n\nclass JEXLRangeField(JEXLBaseField, MultiValueField):\n def __init__(self, attr_name, choices, **kwargs):\n self.attr_name = attr_name\n self.jexl = {\n 'minimum': '{value} <= {attr_name}',\n 'maximum': '{attr_name} < {value}'\n }\n self.jexl = kwargs.pop('jexl', self.jexl)\n fields = (\n ChoiceField(choices=choices),\n ChoiceField(choices=choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],\n template_name='widgets/jexlrange.html')\n\n def compress(self, data_list):\n return ','.join(data_list)\n\n def to_jexl(self, value):\n final_jexl = []\n if value:\n minimum, maximum = value.split(',')\n if minimum:\n final_jexl.append(\n self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)\n )\n if maximum:\n final_jexl.append(\n self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)\n )\n return ' && '.join(final_jexl)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and int(minimum) > int(maximum):\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLFirefoxRangeField(JEXLRangeField):\n def __init__(self, **kwargs):\n # Include only versions greater than 63, where ASRSnippets exist.\n min_version = 64\n # Need to be able to dynamically change this, probably using\n # product_details. Issue #855\n max_version = 84\n\n choices = (\n [(None, 'No limit')] +\n [(x, x) for x in reversed(range(min_version, max_version + 1))]\n )\n super().__init__('firefoxVersion', choices, **kwargs)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and minimum > maximum:\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLAddonField(MultiValueField):\n def __init__(self, **kwargs):\n choices = (\n (None, \"I don't care\"),\n ('not_installed', 'Not Installed'),\n ('installed', 'Installed'),\n )\n fields = (\n ChoiceField(choices=choices),\n ModelChoiceField(queryset=Addon.objects.all(), required=False),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))\n return ''\n\n def to_jexl(self, value):\n check, addon_id = value.split(',')\n if not check or not addon_id:\n return ''\n\n addon = Addon.objects.get(id=addon_id)\n if check == 'not_installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == false'.format(addon.guid)\n elif check == 'installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == true'.format(addon.guid)\n\n return jexl\n\n def validate(self, value):\n check, addon_id = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(addon_id)\n\n if check and not addon_id:\n raise ValidationError('You must select an add-on')\n\n if not check and addon_id:\n raise ValidationError('You must select a check')\n return value\n\n\nclass JEXLFirefoxServicesField(MultiValueField):\n def __init__(self, **kwargs):\n check_choices = (\n (None, \"I don't care\"),\n ('no_account', \"User hasn't signed up for\"),\n ('has_account', 'User has signed up for'),\n )\n service_choices = (\n (None, '---------'),\n ('Firefox Lockwise', 'Firefox Lockwise'),\n ('Firefox Monitor', 'Firefox Monitor'),\n ('Firefox Send', 'Firefox Send'),\n ('Firefox Private Network', 'Firefox Private Network'),\n ('Notes', 'Notes'),\n ('Pocket', 'Pocket'),\n )\n fields = (\n ChoiceField(choices=check_choices),\n ChoiceField(choices=service_choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return f'{data_list[0]},{data_list[1]}'\n return ''\n\n def to_jexl(self, value):\n check, service_name = value.split(',')\n if not check or not service_name:\n return ''\n\n if check == 'no_account':\n jexl = f'(\"{service_name}\" in attachedFxAOAuthClients|mapToProperty(\"name\")) == false'\n elif check == 'has_account':\n jexl = f'(\"{service_name}\" in attachedFxAOAuthClients|mapToProperty(\"name\")) == true'\n\n return jexl\n\n def validate(self, value):\n check, service_name = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(service_name)\n\n if check and not service_name:\n raise ValidationError('You must select an Service.')\n\n if not check and service_name:\n raise ValidationError('You must select a check.')\n return value\n", "path": "snippets/base/admin/fields.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,\n MultiValueField, MultipleChoiceField)\n\nfrom snippets.base.models import Addon, TargetedCountry\n\nfrom .widgets import JEXLMultiWidget\n\n\nclass MultipleChoiceFieldCSV(MultipleChoiceField):\n # To be used with in snippets.base.forms.SnippetAdminForm and in\n # combination with DynamicField. We don't directly save() this field in the\n # database so get_prep_value has not been implemented.\n\n def prepare_value(self, value):\n value = super(MultipleChoiceFieldCSV, self).prepare_value(value)\n if not isinstance(value, list):\n value = value.split(';')\n return value\n\n def clean(self, value):\n value = super(MultipleChoiceFieldCSV, self).clean(value)\n return ';'.join(value)\n\n\nclass JEXLBaseField():\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n return None\n\n\nclass JEXLChoiceField(JEXLBaseField, ChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} == {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n\nclass JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} in {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, str):\n value = value.split(';')\n return super().prepare_value(value)\n\n def clean(self, value):\n value = super().clean(value)\n return ';'.join([str(x.id) for x in value])\n\n\nclass JEXLCountryField(JEXLModelMultipleChoiceField):\n def to_jexl(self, value):\n if value:\n values = TargetedCountry.objects.filter(id__in=value.split(\";\"))\n return f'region in {[x.code for x in values]}'\n return None\n\n\nclass JEXLRangeField(JEXLBaseField, MultiValueField):\n def __init__(self, attr_name, choices, **kwargs):\n self.attr_name = attr_name\n self.jexl = {\n 'minimum': '{value} <= {attr_name}',\n 'maximum': '{attr_name} < {value}'\n }\n self.jexl = kwargs.pop('jexl', self.jexl)\n fields = (\n ChoiceField(choices=choices),\n ChoiceField(choices=choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],\n template_name='widgets/jexlrange.html')\n\n def compress(self, data_list):\n return ','.join(data_list)\n\n def to_jexl(self, value):\n final_jexl = []\n if value:\n minimum, maximum = value.split(',')\n if minimum:\n final_jexl.append(\n self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)\n )\n if maximum:\n final_jexl.append(\n self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)\n )\n return ' && '.join(final_jexl)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and int(minimum) > int(maximum):\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLFirefoxRangeField(JEXLRangeField):\n def __init__(self, **kwargs):\n # Include only versions greater than 63, where ASRSnippets exist.\n min_version = 64\n # Need to be able to dynamically change this, probably using\n # product_details. Issue #855\n max_version = 84\n\n choices = (\n [(None, 'No limit')] +\n [(x, x) for x in reversed(range(min_version, max_version + 1))]\n )\n super().__init__('firefoxVersion', choices, **kwargs)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and minimum > maximum:\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLAddonField(MultiValueField):\n def __init__(self, **kwargs):\n choices = (\n (None, \"I don't care\"),\n ('not_installed', 'Not Installed'),\n ('installed', 'Installed'),\n )\n fields = (\n ChoiceField(choices=choices),\n ModelChoiceField(queryset=Addon.objects.all(), required=False),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))\n return ''\n\n def to_jexl(self, value):\n check, addon_id = value.split(',')\n if not check or not addon_id:\n return ''\n\n addon = Addon.objects.get(id=addon_id)\n if check == 'not_installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == false'.format(addon.guid)\n elif check == 'installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == true'.format(addon.guid)\n\n return jexl\n\n def validate(self, value):\n check, addon_id = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(addon_id)\n\n if check and not addon_id:\n raise ValidationError('You must select an add-on')\n\n if not check and addon_id:\n raise ValidationError('You must select a check')\n return value\n\n\nclass JEXLFirefoxServicesField(MultiValueField):\n def __init__(self, **kwargs):\n check_choices = (\n (None, \"I don't care\"),\n ('no_account', \"User hasn't signed up for\"),\n ('has_account', 'User has signed up for'),\n )\n # Verify IDs using\n # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .\n # Incomplete list of IDs\n # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa\n service_choices = (\n (None, '---------'),\n ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n ('802d56ef2a9af9fa', 'Firefox Monitor'),\n ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),\n ('7ad9917f6c55fb77', 'Firefox Reality'),\n ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n\n )\n fields = (\n ChoiceField(choices=check_choices),\n ChoiceField(choices=service_choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return f'{data_list[0]},{data_list[1]}'\n return ''\n\n def to_jexl(self, value):\n check, ids = value.split(',')\n ids = ids.split('|') if ids else ''\n\n if not check or not ids:\n return ''\n\n jexl = '('\n for id in ids:\n jexl += f'(\"{id}\" in attachedFxAOAuthClients|mapToProperty(\"id\")) || '\n jexl = jexl[:-4]\n\n if check == 'no_account':\n jexl += ') == false'\n elif check == 'has_account':\n jexl += ') == true'\n\n return jexl\n\n def validate(self, value):\n check, service_name = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(service_name)\n\n if check and not service_name:\n raise ValidationError('You must select an Service.')\n\n if not check and service_name:\n raise ValidationError('You must select a check.')\n return value\n", "path": "snippets/base/admin/fields.py"}]}
| 2,861 | 690 |
gh_patches_debug_29509
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-3012
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ListView method to append many
There is a method on ListView to append a single item. We could also use a method to append many items.
If we're following the same conventions as a builtin list, this would be called `ListView.extend` and would accept an Iterable of ListItems.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_list_view.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import ClassVar, Optional
4
5 from textual.await_remove import AwaitRemove
6 from textual.binding import Binding, BindingType
7 from textual.containers import VerticalScroll
8 from textual.events import Mount
9 from textual.geometry import clamp
10 from textual.message import Message
11 from textual.reactive import reactive
12 from textual.widget import AwaitMount, Widget
13 from textual.widgets._list_item import ListItem
14
15
16 class ListView(VerticalScroll, can_focus=True, can_focus_children=False):
17 """A vertical list view widget.
18
19 Displays a vertical list of `ListItem`s which can be highlighted and
20 selected using the mouse or keyboard.
21
22 Attributes:
23 index: The index in the list that's currently highlighted.
24 """
25
26 BINDINGS: ClassVar[list[BindingType]] = [
27 Binding("enter", "select_cursor", "Select", show=False),
28 Binding("up", "cursor_up", "Cursor Up", show=False),
29 Binding("down", "cursor_down", "Cursor Down", show=False),
30 ]
31 """
32 | Key(s) | Description |
33 | :- | :- |
34 | enter | Select the current item. |
35 | up | Move the cursor up. |
36 | down | Move the cursor down. |
37 """
38
39 index = reactive[Optional[int]](0, always_update=True)
40
41 class Highlighted(Message, bubble=True):
42 """Posted when the highlighted item changes.
43
44 Highlighted item is controlled using up/down keys.
45 Can be handled using `on_list_view_highlighted` in a subclass of `ListView`
46 or in a parent widget in the DOM.
47 """
48
49 ALLOW_SELECTOR_MATCH = {"item"}
50 """Additional message attributes that can be used with the [`on` decorator][textual.on]."""
51
52 def __init__(self, list_view: ListView, item: ListItem | None) -> None:
53 super().__init__()
54 self.list_view: ListView = list_view
55 """The view that contains the item highlighted."""
56 self.item: ListItem | None = item
57 """The highlighted item, if there is one highlighted."""
58
59 @property
60 def control(self) -> ListView:
61 """The view that contains the item highlighted.
62
63 This is an alias for [`Highlighted.list_view`][textual.widgets.ListView.Highlighted.list_view]
64 and is used by the [`on`][textual.on] decorator.
65 """
66 return self.list_view
67
68 class Selected(Message, bubble=True):
69 """Posted when a list item is selected, e.g. when you press the enter key on it.
70
71 Can be handled using `on_list_view_selected` in a subclass of `ListView` or in
72 a parent widget in the DOM.
73 """
74
75 ALLOW_SELECTOR_MATCH = {"item"}
76 """Additional message attributes that can be used with the [`on` decorator][textual.on]."""
77
78 def __init__(self, list_view: ListView, item: ListItem) -> None:
79 super().__init__()
80 self.list_view: ListView = list_view
81 """The view that contains the item selected."""
82 self.item: ListItem = item
83 """The selected item."""
84
85 @property
86 def control(self) -> ListView:
87 """The view that contains the item selected.
88
89 This is an alias for [`Selected.list_view`][textual.widgets.ListView.Selected.list_view]
90 and is used by the [`on`][textual.on] decorator.
91 """
92 return self.list_view
93
94 def __init__(
95 self,
96 *children: ListItem,
97 initial_index: int | None = 0,
98 name: str | None = None,
99 id: str | None = None,
100 classes: str | None = None,
101 disabled: bool = False,
102 ) -> None:
103 """
104 Initialize a ListView.
105
106 Args:
107 *children: The ListItems to display in the list.
108 initial_index: The index that should be highlighted when the list is first mounted.
109 name: The name of the widget.
110 id: The unique ID of the widget used in CSS/query selection.
111 classes: The CSS classes of the widget.
112 disabled: Whether the ListView is disabled or not.
113 """
114 super().__init__(
115 *children, name=name, id=id, classes=classes, disabled=disabled
116 )
117 self._index = initial_index
118
119 def _on_mount(self, _: Mount) -> None:
120 """Ensure the ListView is fully-settled after mounting."""
121 self.index = self._index
122
123 @property
124 def highlighted_child(self) -> ListItem | None:
125 """The currently highlighted ListItem, or None if nothing is highlighted."""
126 if self.index is not None and 0 <= self.index < len(self._nodes):
127 list_item = self._nodes[self.index]
128 assert isinstance(list_item, ListItem)
129 return list_item
130 else:
131 return None
132
133 def validate_index(self, index: int | None) -> int | None:
134 """Clamp the index to the valid range, or set to None if there's nothing to highlight.
135
136 Args:
137 index: The index to clamp.
138
139 Returns:
140 The clamped index.
141 """
142 if not self._nodes or index is None:
143 return None
144 return self._clamp_index(index)
145
146 def _clamp_index(self, index: int) -> int:
147 """Clamp the index to a valid value given the current list of children"""
148 last_index = max(len(self._nodes) - 1, 0)
149 return clamp(index, 0, last_index)
150
151 def _is_valid_index(self, index: int | None) -> bool:
152 """Return True if the current index is valid given the current list of children"""
153 if index is None:
154 return False
155 return 0 <= index < len(self._nodes)
156
157 def watch_index(self, old_index: int, new_index: int) -> None:
158 """Updates the highlighting when the index changes."""
159 if self._is_valid_index(old_index):
160 old_child = self._nodes[old_index]
161 assert isinstance(old_child, ListItem)
162 old_child.highlighted = False
163
164 new_child: Widget | None
165 if self._is_valid_index(new_index):
166 new_child = self._nodes[new_index]
167 assert isinstance(new_child, ListItem)
168 new_child.highlighted = True
169 else:
170 new_child = None
171
172 self._scroll_highlighted_region()
173 self.post_message(self.Highlighted(self, new_child))
174
175 def append(self, item: ListItem) -> AwaitMount:
176 """Append a new ListItem to the end of the ListView.
177
178 Args:
179 item: The ListItem to append.
180
181 Returns:
182 An awaitable that yields control to the event loop
183 until the DOM has been updated with the new child item.
184 """
185 await_mount = self.mount(item)
186 if len(self) == 1:
187 self.index = 0
188 return await_mount
189
190 def clear(self) -> AwaitRemove:
191 """Clear all items from the ListView.
192
193 Returns:
194 An awaitable that yields control to the event loop until
195 the DOM has been updated to reflect all children being removed.
196 """
197 await_remove = self.query("ListView > ListItem").remove()
198 self.index = None
199 return await_remove
200
201 def action_select_cursor(self) -> None:
202 """Select the current item in the list."""
203 selected_child = self.highlighted_child
204 if selected_child is None:
205 return
206 self.post_message(self.Selected(self, selected_child))
207
208 def action_cursor_down(self) -> None:
209 """Highlight the next item in the list."""
210 if self.index is None:
211 self.index = 0
212 return
213 self.index += 1
214
215 def action_cursor_up(self) -> None:
216 """Highlight the previous item in the list."""
217 if self.index is None:
218 self.index = 0
219 return
220 self.index -= 1
221
222 def _on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:
223 self.focus()
224 self.index = self._nodes.index(event.item)
225 self.post_message(self.Selected(self, event.item))
226
227 def _scroll_highlighted_region(self) -> None:
228 """Used to keep the highlighted index within vision"""
229 if self.highlighted_child is not None:
230 self.scroll_to_widget(self.highlighted_child, animate=False)
231
232 def __len__(self):
233 return len(self._nodes)
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/widgets/_list_view.py b/src/textual/widgets/_list_view.py
--- a/src/textual/widgets/_list_view.py
+++ b/src/textual/widgets/_list_view.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import ClassVar, Optional
+from typing import ClassVar, Iterable, Optional
from textual.await_remove import AwaitRemove
from textual.binding import Binding, BindingType
@@ -172,6 +172,21 @@
self._scroll_highlighted_region()
self.post_message(self.Highlighted(self, new_child))
+ def extend(self, items: Iterable[ListItem]) -> AwaitMount:
+ """Append multiple new ListItems to the end of the ListView.
+
+ Args:
+ items: The ListItems to append.
+
+ Returns:
+ An awaitable that yields control to the event loop
+ until the DOM has been updated with the new child items.
+ """
+ await_mount = self.mount(*items)
+ if len(self) == 1:
+ self.index = 0
+ return await_mount
+
def append(self, item: ListItem) -> AwaitMount:
"""Append a new ListItem to the end of the ListView.
@@ -182,10 +197,7 @@
An awaitable that yields control to the event loop
until the DOM has been updated with the new child item.
"""
- await_mount = self.mount(item)
- if len(self) == 1:
- self.index = 0
- return await_mount
+ return self.extend([item])
def clear(self) -> AwaitRemove:
"""Clear all items from the ListView.
|
{"golden_diff": "diff --git a/src/textual/widgets/_list_view.py b/src/textual/widgets/_list_view.py\n--- a/src/textual/widgets/_list_view.py\n+++ b/src/textual/widgets/_list_view.py\n@@ -1,6 +1,6 @@\n from __future__ import annotations\n \n-from typing import ClassVar, Optional\n+from typing import ClassVar, Iterable, Optional\n \n from textual.await_remove import AwaitRemove\n from textual.binding import Binding, BindingType\n@@ -172,6 +172,21 @@\n self._scroll_highlighted_region()\n self.post_message(self.Highlighted(self, new_child))\n \n+ def extend(self, items: Iterable[ListItem]) -> AwaitMount:\n+ \"\"\"Append multiple new ListItems to the end of the ListView.\n+\n+ Args:\n+ items: The ListItems to append.\n+\n+ Returns:\n+ An awaitable that yields control to the event loop\n+ until the DOM has been updated with the new child items.\n+ \"\"\"\n+ await_mount = self.mount(*items)\n+ if len(self) == 1:\n+ self.index = 0\n+ return await_mount\n+\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n \n@@ -182,10 +197,7 @@\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n- await_mount = self.mount(item)\n- if len(self) == 1:\n- self.index = 0\n- return await_mount\n+ return self.extend([item])\n \n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n", "issue": "ListView method to append many\nThere is a method on ListView to append a single item. We could also use a method to append many items.\n\nIf we're following the same conventions as a builtin list, this would be called `ListView.extend` and would accept an Iterable of ListItems.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import ClassVar, Optional\n\nfrom textual.await_remove import AwaitRemove\nfrom textual.binding import Binding, BindingType\nfrom textual.containers import VerticalScroll\nfrom textual.events import Mount\nfrom textual.geometry import clamp\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widget import AwaitMount, Widget\nfrom textual.widgets._list_item import ListItem\n\n\nclass ListView(VerticalScroll, can_focus=True, can_focus_children=False):\n \"\"\"A vertical list view widget.\n\n Displays a vertical list of `ListItem`s which can be highlighted and\n selected using the mouse or keyboard.\n\n Attributes:\n index: The index in the list that's currently highlighted.\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"enter\", \"select_cursor\", \"Select\", show=False),\n Binding(\"up\", \"cursor_up\", \"Cursor Up\", show=False),\n Binding(\"down\", \"cursor_down\", \"Cursor Down\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Select the current item. |\n | up | Move the cursor up. |\n | down | Move the cursor down. |\n \"\"\"\n\n index = reactive[Optional[int]](0, always_update=True)\n\n class Highlighted(Message, bubble=True):\n \"\"\"Posted when the highlighted item changes.\n\n Highlighted item is controlled using up/down keys.\n Can be handled using `on_list_view_highlighted` in a subclass of `ListView`\n or in a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem | None) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item highlighted.\"\"\"\n self.item: ListItem | None = item\n \"\"\"The highlighted item, if there is one highlighted.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item highlighted.\n\n This is an alias for [`Highlighted.list_view`][textual.widgets.ListView.Highlighted.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n class Selected(Message, bubble=True):\n \"\"\"Posted when a list item is selected, e.g. when you press the enter key on it.\n\n Can be handled using `on_list_view_selected` in a subclass of `ListView` or in\n a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item selected.\"\"\"\n self.item: ListItem = item\n \"\"\"The selected item.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item selected.\n\n This is an alias for [`Selected.list_view`][textual.widgets.ListView.Selected.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n def __init__(\n self,\n *children: ListItem,\n initial_index: int | None = 0,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"\n Initialize a ListView.\n\n Args:\n *children: The ListItems to display in the list.\n initial_index: The index that should be highlighted when the list is first mounted.\n name: The name of the widget.\n id: The unique ID of the widget used in CSS/query selection.\n classes: The CSS classes of the widget.\n disabled: Whether the ListView is disabled or not.\n \"\"\"\n super().__init__(\n *children, name=name, id=id, classes=classes, disabled=disabled\n )\n self._index = initial_index\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Ensure the ListView is fully-settled after mounting.\"\"\"\n self.index = self._index\n\n @property\n def highlighted_child(self) -> ListItem | None:\n \"\"\"The currently highlighted ListItem, or None if nothing is highlighted.\"\"\"\n if self.index is not None and 0 <= self.index < len(self._nodes):\n list_item = self._nodes[self.index]\n assert isinstance(list_item, ListItem)\n return list_item\n else:\n return None\n\n def validate_index(self, index: int | None) -> int | None:\n \"\"\"Clamp the index to the valid range, or set to None if there's nothing to highlight.\n\n Args:\n index: The index to clamp.\n\n Returns:\n The clamped index.\n \"\"\"\n if not self._nodes or index is None:\n return None\n return self._clamp_index(index)\n\n def _clamp_index(self, index: int) -> int:\n \"\"\"Clamp the index to a valid value given the current list of children\"\"\"\n last_index = max(len(self._nodes) - 1, 0)\n return clamp(index, 0, last_index)\n\n def _is_valid_index(self, index: int | None) -> bool:\n \"\"\"Return True if the current index is valid given the current list of children\"\"\"\n if index is None:\n return False\n return 0 <= index < len(self._nodes)\n\n def watch_index(self, old_index: int, new_index: int) -> None:\n \"\"\"Updates the highlighting when the index changes.\"\"\"\n if self._is_valid_index(old_index):\n old_child = self._nodes[old_index]\n assert isinstance(old_child, ListItem)\n old_child.highlighted = False\n\n new_child: Widget | None\n if self._is_valid_index(new_index):\n new_child = self._nodes[new_index]\n assert isinstance(new_child, ListItem)\n new_child.highlighted = True\n else:\n new_child = None\n\n self._scroll_highlighted_region()\n self.post_message(self.Highlighted(self, new_child))\n\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n\n Args:\n item: The ListItem to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n await_mount = self.mount(item)\n if len(self) == 1:\n self.index = 0\n return await_mount\n\n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n\n Returns:\n An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n \"\"\"\n await_remove = self.query(\"ListView > ListItem\").remove()\n self.index = None\n return await_remove\n\n def action_select_cursor(self) -> None:\n \"\"\"Select the current item in the list.\"\"\"\n selected_child = self.highlighted_child\n if selected_child is None:\n return\n self.post_message(self.Selected(self, selected_child))\n\n def action_cursor_down(self) -> None:\n \"\"\"Highlight the next item in the list.\"\"\"\n if self.index is None:\n self.index = 0\n return\n self.index += 1\n\n def action_cursor_up(self) -> None:\n \"\"\"Highlight the previous item in the list.\"\"\"\n if self.index is None:\n self.index = 0\n return\n self.index -= 1\n\n def _on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:\n self.focus()\n self.index = self._nodes.index(event.item)\n self.post_message(self.Selected(self, event.item))\n\n def _scroll_highlighted_region(self) -> None:\n \"\"\"Used to keep the highlighted index within vision\"\"\"\n if self.highlighted_child is not None:\n self.scroll_to_widget(self.highlighted_child, animate=False)\n\n def __len__(self):\n return len(self._nodes)\n", "path": "src/textual/widgets/_list_view.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import ClassVar, Iterable, Optional\n\nfrom textual.await_remove import AwaitRemove\nfrom textual.binding import Binding, BindingType\nfrom textual.containers import VerticalScroll\nfrom textual.events import Mount\nfrom textual.geometry import clamp\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widget import AwaitMount, Widget\nfrom textual.widgets._list_item import ListItem\n\n\nclass ListView(VerticalScroll, can_focus=True, can_focus_children=False):\n \"\"\"A vertical list view widget.\n\n Displays a vertical list of `ListItem`s which can be highlighted and\n selected using the mouse or keyboard.\n\n Attributes:\n index: The index in the list that's currently highlighted.\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"enter\", \"select_cursor\", \"Select\", show=False),\n Binding(\"up\", \"cursor_up\", \"Cursor Up\", show=False),\n Binding(\"down\", \"cursor_down\", \"Cursor Down\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Select the current item. |\n | up | Move the cursor up. |\n | down | Move the cursor down. |\n \"\"\"\n\n index = reactive[Optional[int]](0, always_update=True)\n\n class Highlighted(Message, bubble=True):\n \"\"\"Posted when the highlighted item changes.\n\n Highlighted item is controlled using up/down keys.\n Can be handled using `on_list_view_highlighted` in a subclass of `ListView`\n or in a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem | None) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item highlighted.\"\"\"\n self.item: ListItem | None = item\n \"\"\"The highlighted item, if there is one highlighted.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item highlighted.\n\n This is an alias for [`Highlighted.list_view`][textual.widgets.ListView.Highlighted.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n class Selected(Message, bubble=True):\n \"\"\"Posted when a list item is selected, e.g. when you press the enter key on it.\n\n Can be handled using `on_list_view_selected` in a subclass of `ListView` or in\n a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item selected.\"\"\"\n self.item: ListItem = item\n \"\"\"The selected item.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item selected.\n\n This is an alias for [`Selected.list_view`][textual.widgets.ListView.Selected.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n def __init__(\n self,\n *children: ListItem,\n initial_index: int | None = 0,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"\n Initialize a ListView.\n\n Args:\n *children: The ListItems to display in the list.\n initial_index: The index that should be highlighted when the list is first mounted.\n name: The name of the widget.\n id: The unique ID of the widget used in CSS/query selection.\n classes: The CSS classes of the widget.\n disabled: Whether the ListView is disabled or not.\n \"\"\"\n super().__init__(\n *children, name=name, id=id, classes=classes, disabled=disabled\n )\n self._index = initial_index\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Ensure the ListView is fully-settled after mounting.\"\"\"\n self.index = self._index\n\n @property\n def highlighted_child(self) -> ListItem | None:\n \"\"\"The currently highlighted ListItem, or None if nothing is highlighted.\"\"\"\n if self.index is not None and 0 <= self.index < len(self._nodes):\n list_item = self._nodes[self.index]\n assert isinstance(list_item, ListItem)\n return list_item\n else:\n return None\n\n def validate_index(self, index: int | None) -> int | None:\n \"\"\"Clamp the index to the valid range, or set to None if there's nothing to highlight.\n\n Args:\n index: The index to clamp.\n\n Returns:\n The clamped index.\n \"\"\"\n if not self._nodes or index is None:\n return None\n return self._clamp_index(index)\n\n def _clamp_index(self, index: int) -> int:\n \"\"\"Clamp the index to a valid value given the current list of children\"\"\"\n last_index = max(len(self._nodes) - 1, 0)\n return clamp(index, 0, last_index)\n\n def _is_valid_index(self, index: int | None) -> bool:\n \"\"\"Return True if the current index is valid given the current list of children\"\"\"\n if index is None:\n return False\n return 0 <= index < len(self._nodes)\n\n def watch_index(self, old_index: int, new_index: int) -> None:\n \"\"\"Updates the highlighting when the index changes.\"\"\"\n if self._is_valid_index(old_index):\n old_child = self._nodes[old_index]\n assert isinstance(old_child, ListItem)\n old_child.highlighted = False\n\n new_child: Widget | None\n if self._is_valid_index(new_index):\n new_child = self._nodes[new_index]\n assert isinstance(new_child, ListItem)\n new_child.highlighted = True\n else:\n new_child = None\n\n self._scroll_highlighted_region()\n self.post_message(self.Highlighted(self, new_child))\n\n def extend(self, items: Iterable[ListItem]) -> AwaitMount:\n \"\"\"Append multiple new ListItems to the end of the ListView.\n\n Args:\n items: The ListItems to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child items.\n \"\"\"\n await_mount = self.mount(*items)\n if len(self) == 1:\n self.index = 0\n return await_mount\n\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n\n Args:\n item: The ListItem to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n return self.extend([item])\n\n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n\n Returns:\n An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n \"\"\"\n await_remove = self.query(\"ListView > ListItem\").remove()\n self.index = None\n return await_remove\n\n def action_select_cursor(self) -> None:\n \"\"\"Select the current item in the list.\"\"\"\n selected_child = self.highlighted_child\n if selected_child is None:\n return\n self.post_message(self.Selected(self, selected_child))\n\n def action_cursor_down(self) -> None:\n \"\"\"Highlight the next item in the list.\"\"\"\n if self.index is None:\n self.index = 0\n return\n self.index += 1\n\n def action_cursor_up(self) -> None:\n \"\"\"Highlight the previous item in the list.\"\"\"\n if self.index is None:\n self.index = 0\n return\n self.index -= 1\n\n def _on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:\n self.focus()\n self.index = self._nodes.index(event.item)\n self.post_message(self.Selected(self, event.item))\n\n def _scroll_highlighted_region(self) -> None:\n \"\"\"Used to keep the highlighted index within vision\"\"\"\n if self.highlighted_child is not None:\n self.scroll_to_widget(self.highlighted_child, animate=False)\n\n def __len__(self):\n return len(self._nodes)\n", "path": "src/textual/widgets/_list_view.py"}]}
| 2,715 | 378 |
gh_patches_debug_2785
|
rasdani/github-patches
|
git_diff
|
dynaconf__dynaconf-769
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFC] Resolve depreciation warning for depreciated property kv
**Is your feature request related to a problem? Please describe.**
Yes, Currently we are hitting the depreciation warning in hvac 0.11 since the kv property is depreciated and adviced to use from `Client.secrets`
Clear Warning:
DeprecationWarning: Call to deprecated property 'kv'. This property will be removed in version '0.9.0' Please use the 'kv' property on the 'Client.secrets' attribute moving forward
**Describe the solution you'd like**
Remove the usage of kv property directly in dynaconf and use if from `Client.secrets`
**Describe alternatives you've considered**
The alternative is not required.
[RFC] Resolve depreciation warning for depreciated property kv
**Is your feature request related to a problem? Please describe.**
Yes, Currently we are hitting the depreciation warning in hvac 0.11 since the kv property is depreciated and adviced to use from `Client.secrets`
Clear Warning:
DeprecationWarning: Call to deprecated property 'kv'. This property will be removed in version '0.9.0' Please use the 'kv' property on the 'Client.secrets' attribute moving forward
**Describe the solution you'd like**
Remove the usage of kv property directly in dynaconf and use if from `Client.secrets`
**Describe alternatives you've considered**
The alternative is not required.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/loaders/vault_loader.py`
Content:
```
1 # docker run -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot' -p 8200:8200 vault
2 # pip install hvac
3 from __future__ import annotations
4
5 from dynaconf.utils import build_env_list
6 from dynaconf.utils.parse_conf import parse_conf_data
7
8 try:
9 import boto3
10 except ImportError:
11 boto3 = None
12
13 try:
14 from hvac import Client
15 from hvac.exceptions import InvalidPath
16 except ImportError:
17 raise ImportError(
18 "vault package is not installed in your environment. "
19 "`pip install dynaconf[vault]` or disable the vault loader with "
20 "export VAULT_ENABLED_FOR_DYNACONF=false"
21 )
22
23
24 IDENTIFIER = "vault"
25
26
27 # backwards compatibility
28 _get_env_list = build_env_list
29
30
31 def get_client(obj):
32 client = Client(
33 **{k: v for k, v in obj.VAULT_FOR_DYNACONF.items() if v is not None}
34 )
35 if obj.VAULT_ROLE_ID_FOR_DYNACONF is not None:
36 client.auth_approle(
37 role_id=obj.VAULT_ROLE_ID_FOR_DYNACONF,
38 secret_id=obj.get("VAULT_SECRET_ID_FOR_DYNACONF"),
39 )
40 elif obj.VAULT_ROOT_TOKEN_FOR_DYNACONF is not None:
41 client.token = obj.VAULT_ROOT_TOKEN_FOR_DYNACONF
42 elif obj.VAULT_AUTH_WITH_IAM_FOR_DYNACONF:
43 if boto3 is None:
44 raise ImportError(
45 "boto3 package is not installed in your environment. "
46 "`pip install boto3` or disable the VAULT_AUTH_WITH_IAM"
47 )
48
49 session = boto3.Session()
50 credentials = session.get_credentials()
51 client.auth.aws.iam_login(
52 credentials.access_key,
53 credentials.secret_key,
54 credentials.token,
55 role=obj.VAULT_AUTH_ROLE_FOR_DYNACONF,
56 )
57 assert client.is_authenticated(), (
58 "Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or "
59 "VAULT_ROLE_ID_FOR_DYNACONF defined?"
60 )
61 client.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF
62 return client
63
64
65 def load(obj, env=None, silent=None, key=None):
66 """Reads and loads in to "settings" a single key or all keys from vault
67
68 :param obj: the settings instance
69 :param env: settings env default='DYNACONF'
70 :param silent: if errors should raise
71 :param key: if defined load a single key, else load all in env
72 :return: None
73 """
74 client = get_client(obj)
75 try:
76 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
77 dirs = client.secrets.kv.v2.list_secrets(
78 path=obj.VAULT_PATH_FOR_DYNACONF,
79 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
80 )["data"]["keys"]
81 else:
82 dirs = client.secrets.kv.v1.list_secrets(
83 path=obj.VAULT_PATH_FOR_DYNACONF,
84 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
85 )["data"]["keys"]
86 except InvalidPath:
87 # The given path is not a directory
88 dirs = []
89 # First look for secrets into environments less store
90 if not obj.ENVIRONMENTS_FOR_DYNACONF:
91 # By adding '', dynaconf will now read secrets from environments-less
92 # store which are not written by `dynaconf write` to Vault store
93 env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), ""]
94 # Finally, look for secret into all the environments
95 else:
96 env_list = dirs + build_env_list(obj, env)
97 for env in env_list:
98 path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env])
99 try:
100 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
101 data = client.secrets.kv.v2.read_secret_version(
102 path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF
103 )
104 else:
105 data = client.secrets.kv.read_secret(
106 "data/" + path,
107 mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
108 )
109 except InvalidPath:
110 # If the path doesn't exist, ignore it and set data to None
111 data = None
112 if data:
113 # There seems to be a data dict within a data dict,
114 # extract the inner data
115 data = data.get("data", {}).get("data", {})
116 try:
117 if (
118 obj.VAULT_KV_VERSION_FOR_DYNACONF == 2
119 and obj.ENVIRONMENTS_FOR_DYNACONF
120 and data
121 ):
122 data = data.get("data", {})
123 if data and key:
124 value = parse_conf_data(
125 data.get(key), tomlfy=True, box_settings=obj
126 )
127 if value:
128 obj.set(key, value)
129 elif data:
130 obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
131 except Exception:
132 if silent:
133 return False
134 raise
135
136
137 def write(obj, data=None, **kwargs):
138 """Write a value in to loader source
139
140 :param obj: settings object
141 :param data: vars to be stored
142 :param kwargs: vars to be stored
143 :return:
144 """
145 if obj.VAULT_ENABLED_FOR_DYNACONF is False:
146 raise RuntimeError(
147 "Vault is not configured \n"
148 "export VAULT_ENABLED_FOR_DYNACONF=true\n"
149 "and configure the VAULT_FOR_DYNACONF_* variables"
150 )
151 data = data or {}
152 data.update(kwargs)
153 if not data:
154 raise AttributeError("Data must be provided")
155 data = {"data": data}
156 client = get_client(obj)
157 if obj.VAULT_KV_VERSION_FOR_DYNACONF == 1:
158 mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF + "/data"
159 else:
160 mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF
161 path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, obj.current_env.lower()])
162 client.secrets.kv.create_or_update_secret(
163 path, secret=data, mount_point=mount_point
164 )
165 load(obj)
166
167
168 def list_envs(obj, path=""):
169 """
170 This function is a helper to get a list of all the existing envs in
171 the source of data, the use case is:
172 existing_envs = vault_loader.list_envs(settings)
173 for env in exiting_envs:
174 with settings.using_env(env): # switch to the env
175 # do something with a key of that env
176
177 :param obj: settings object
178 :param path: path to the vault secrets
179 :return: list containing all the keys at the given path
180 """
181 client = get_client(obj)
182 path = path or obj.get("VAULT_PATH_FOR_DYNACONF")
183 try:
184 return client.list(f"/secret/metadata/{path}")["data"]["keys"]
185 except TypeError:
186 return []
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dynaconf/loaders/vault_loader.py b/dynaconf/loaders/vault_loader.py
--- a/dynaconf/loaders/vault_loader.py
+++ b/dynaconf/loaders/vault_loader.py
@@ -58,7 +58,7 @@
"Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or "
"VAULT_ROLE_ID_FOR_DYNACONF defined?"
)
- client.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF
+ client.secrets.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF
return client
|
{"golden_diff": "diff --git a/dynaconf/loaders/vault_loader.py b/dynaconf/loaders/vault_loader.py\n--- a/dynaconf/loaders/vault_loader.py\n+++ b/dynaconf/loaders/vault_loader.py\n@@ -58,7 +58,7 @@\n \"Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or \"\n \"VAULT_ROLE_ID_FOR_DYNACONF defined?\"\n )\n- client.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF\n+ client.secrets.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF\n return client\n", "issue": "[RFC] Resolve depreciation warning for depreciated property kv\n**Is your feature request related to a problem? Please describe.**\r\nYes, Currently we are hitting the depreciation warning in hvac 0.11 since the kv property is depreciated and adviced to use from `Client.secrets`\r\n\r\nClear Warning:\r\nDeprecationWarning: Call to deprecated property 'kv'. This property will be removed in version '0.9.0' Please use the 'kv' property on the 'Client.secrets' attribute moving forward\r\n\r\n**Describe the solution you'd like**\r\nRemove the usage of kv property directly in dynaconf and use if from `Client.secrets`\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is not required.\r\n\r\n\n[RFC] Resolve depreciation warning for depreciated property kv\n**Is your feature request related to a problem? Please describe.**\r\nYes, Currently we are hitting the depreciation warning in hvac 0.11 since the kv property is depreciated and adviced to use from `Client.secrets`\r\n\r\nClear Warning:\r\nDeprecationWarning: Call to deprecated property 'kv'. This property will be removed in version '0.9.0' Please use the 'kv' property on the 'Client.secrets' attribute moving forward\r\n\r\n**Describe the solution you'd like**\r\nRemove the usage of kv property directly in dynaconf and use if from `Client.secrets`\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative is not required.\r\n\r\n\n", "before_files": [{"content": "# docker run -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot' -p 8200:8200 vault\n# pip install hvac\nfrom __future__ import annotations\n\nfrom dynaconf.utils import build_env_list\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from hvac import Client\n from hvac.exceptions import InvalidPath\nexcept ImportError:\n raise ImportError(\n \"vault package is not installed in your environment. \"\n \"`pip install dynaconf[vault]` or disable the vault loader with \"\n \"export VAULT_ENABLED_FOR_DYNACONF=false\"\n )\n\n\nIDENTIFIER = \"vault\"\n\n\n# backwards compatibility\n_get_env_list = build_env_list\n\n\ndef get_client(obj):\n client = Client(\n **{k: v for k, v in obj.VAULT_FOR_DYNACONF.items() if v is not None}\n )\n if obj.VAULT_ROLE_ID_FOR_DYNACONF is not None:\n client.auth_approle(\n role_id=obj.VAULT_ROLE_ID_FOR_DYNACONF,\n secret_id=obj.get(\"VAULT_SECRET_ID_FOR_DYNACONF\"),\n )\n elif obj.VAULT_ROOT_TOKEN_FOR_DYNACONF is not None:\n client.token = obj.VAULT_ROOT_TOKEN_FOR_DYNACONF\n elif obj.VAULT_AUTH_WITH_IAM_FOR_DYNACONF:\n if boto3 is None:\n raise ImportError(\n \"boto3 package is not installed in your environment. \"\n \"`pip install boto3` or disable the VAULT_AUTH_WITH_IAM\"\n )\n\n session = boto3.Session()\n credentials = session.get_credentials()\n client.auth.aws.iam_login(\n credentials.access_key,\n credentials.secret_key,\n credentials.token,\n role=obj.VAULT_AUTH_ROLE_FOR_DYNACONF,\n )\n assert client.is_authenticated(), (\n \"Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or \"\n \"VAULT_ROLE_ID_FOR_DYNACONF defined?\"\n )\n client.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF\n return client\n\n\ndef load(obj, env=None, silent=None, key=None):\n \"\"\"Reads and loads in to \"settings\" a single key or all keys from vault\n\n :param obj: the settings instance\n :param env: settings env default='DYNACONF'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :return: None\n \"\"\"\n client = get_client(obj)\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n dirs = client.secrets.kv.v2.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n else:\n dirs = client.secrets.kv.v1.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n except InvalidPath:\n # The given path is not a directory\n dirs = []\n # First look for secrets into environments less store\n if not obj.ENVIRONMENTS_FOR_DYNACONF:\n # By adding '', dynaconf will now read secrets from environments-less\n # store which are not written by `dynaconf write` to Vault store\n env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), \"\"]\n # Finally, look for secret into all the environments\n else:\n env_list = dirs + build_env_list(obj, env)\n for env in env_list:\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, env])\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n data = client.secrets.kv.v2.read_secret_version(\n path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n )\n else:\n data = client.secrets.kv.read_secret(\n \"data/\" + path,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )\n except InvalidPath:\n # If the path doesn't exist, ignore it and set data to None\n data = None\n if data:\n # There seems to be a data dict within a data dict,\n # extract the inner data\n data = data.get(\"data\", {}).get(\"data\", {})\n try:\n if (\n obj.VAULT_KV_VERSION_FOR_DYNACONF == 2\n and obj.ENVIRONMENTS_FOR_DYNACONF\n and data\n ):\n data = data.get(\"data\", {})\n if data and key:\n value = parse_conf_data(\n data.get(key), tomlfy=True, box_settings=obj\n )\n if value:\n obj.set(key, value)\n elif data:\n obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)\n except Exception:\n if silent:\n return False\n raise\n\n\ndef write(obj, data=None, **kwargs):\n \"\"\"Write a value in to loader source\n\n :param obj: settings object\n :param data: vars to be stored\n :param kwargs: vars to be stored\n :return:\n \"\"\"\n if obj.VAULT_ENABLED_FOR_DYNACONF is False:\n raise RuntimeError(\n \"Vault is not configured \\n\"\n \"export VAULT_ENABLED_FOR_DYNACONF=true\\n\"\n \"and configure the VAULT_FOR_DYNACONF_* variables\"\n )\n data = data or {}\n data.update(kwargs)\n if not data:\n raise AttributeError(\"Data must be provided\")\n data = {\"data\": data}\n client = get_client(obj)\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 1:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF + \"/data\"\n else:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, obj.current_env.lower()])\n client.secrets.kv.create_or_update_secret(\n path, secret=data, mount_point=mount_point\n )\n load(obj)\n\n\ndef list_envs(obj, path=\"\"):\n \"\"\"\n This function is a helper to get a list of all the existing envs in\n the source of data, the use case is:\n existing_envs = vault_loader.list_envs(settings)\n for env in exiting_envs:\n with settings.using_env(env): # switch to the env\n # do something with a key of that env\n\n :param obj: settings object\n :param path: path to the vault secrets\n :return: list containing all the keys at the given path\n \"\"\"\n client = get_client(obj)\n path = path or obj.get(\"VAULT_PATH_FOR_DYNACONF\")\n try:\n return client.list(f\"/secret/metadata/{path}\")[\"data\"][\"keys\"]\n except TypeError:\n return []\n", "path": "dynaconf/loaders/vault_loader.py"}], "after_files": [{"content": "# docker run -e 'VAULT_DEV_ROOT_TOKEN_ID=myroot' -p 8200:8200 vault\n# pip install hvac\nfrom __future__ import annotations\n\nfrom dynaconf.utils import build_env_list\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from hvac import Client\n from hvac.exceptions import InvalidPath\nexcept ImportError:\n raise ImportError(\n \"vault package is not installed in your environment. \"\n \"`pip install dynaconf[vault]` or disable the vault loader with \"\n \"export VAULT_ENABLED_FOR_DYNACONF=false\"\n )\n\n\nIDENTIFIER = \"vault\"\n\n\n# backwards compatibility\n_get_env_list = build_env_list\n\n\ndef get_client(obj):\n client = Client(\n **{k: v for k, v in obj.VAULT_FOR_DYNACONF.items() if v is not None}\n )\n if obj.VAULT_ROLE_ID_FOR_DYNACONF is not None:\n client.auth_approle(\n role_id=obj.VAULT_ROLE_ID_FOR_DYNACONF,\n secret_id=obj.get(\"VAULT_SECRET_ID_FOR_DYNACONF\"),\n )\n elif obj.VAULT_ROOT_TOKEN_FOR_DYNACONF is not None:\n client.token = obj.VAULT_ROOT_TOKEN_FOR_DYNACONF\n elif obj.VAULT_AUTH_WITH_IAM_FOR_DYNACONF:\n if boto3 is None:\n raise ImportError(\n \"boto3 package is not installed in your environment. \"\n \"`pip install boto3` or disable the VAULT_AUTH_WITH_IAM\"\n )\n\n session = boto3.Session()\n credentials = session.get_credentials()\n client.auth.aws.iam_login(\n credentials.access_key,\n credentials.secret_key,\n credentials.token,\n role=obj.VAULT_AUTH_ROLE_FOR_DYNACONF,\n )\n assert client.is_authenticated(), (\n \"Vault authentication error: is VAULT_TOKEN_FOR_DYNACONF or \"\n \"VAULT_ROLE_ID_FOR_DYNACONF defined?\"\n )\n client.secrets.kv.default_kv_version = obj.VAULT_KV_VERSION_FOR_DYNACONF\n return client\n\n\ndef load(obj, env=None, silent=None, key=None):\n \"\"\"Reads and loads in to \"settings\" a single key or all keys from vault\n\n :param obj: the settings instance\n :param env: settings env default='DYNACONF'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :return: None\n \"\"\"\n client = get_client(obj)\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n dirs = client.secrets.kv.v2.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n else:\n dirs = client.secrets.kv.v1.list_secrets(\n path=obj.VAULT_PATH_FOR_DYNACONF,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )[\"data\"][\"keys\"]\n except InvalidPath:\n # The given path is not a directory\n dirs = []\n # First look for secrets into environments less store\n if not obj.ENVIRONMENTS_FOR_DYNACONF:\n # By adding '', dynaconf will now read secrets from environments-less\n # store which are not written by `dynaconf write` to Vault store\n env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), \"\"]\n # Finally, look for secret into all the environments\n else:\n env_list = dirs + build_env_list(obj, env)\n for env in env_list:\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, env])\n try:\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:\n data = client.secrets.kv.v2.read_secret_version(\n path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n )\n else:\n data = client.secrets.kv.read_secret(\n \"data/\" + path,\n mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,\n )\n except InvalidPath:\n # If the path doesn't exist, ignore it and set data to None\n data = None\n if data:\n # There seems to be a data dict within a data dict,\n # extract the inner data\n data = data.get(\"data\", {}).get(\"data\", {})\n try:\n if (\n obj.VAULT_KV_VERSION_FOR_DYNACONF == 2\n and obj.ENVIRONMENTS_FOR_DYNACONF\n and data\n ):\n data = data.get(\"data\", {})\n if data and key:\n value = parse_conf_data(\n data.get(key), tomlfy=True, box_settings=obj\n )\n if value:\n obj.set(key, value)\n elif data:\n obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)\n except Exception:\n if silent:\n return False\n raise\n\n\ndef write(obj, data=None, **kwargs):\n \"\"\"Write a value in to loader source\n\n :param obj: settings object\n :param data: vars to be stored\n :param kwargs: vars to be stored\n :return:\n \"\"\"\n if obj.VAULT_ENABLED_FOR_DYNACONF is False:\n raise RuntimeError(\n \"Vault is not configured \\n\"\n \"export VAULT_ENABLED_FOR_DYNACONF=true\\n\"\n \"and configure the VAULT_FOR_DYNACONF_* variables\"\n )\n data = data or {}\n data.update(kwargs)\n if not data:\n raise AttributeError(\"Data must be provided\")\n data = {\"data\": data}\n client = get_client(obj)\n if obj.VAULT_KV_VERSION_FOR_DYNACONF == 1:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF + \"/data\"\n else:\n mount_point = obj.VAULT_MOUNT_POINT_FOR_DYNACONF\n path = \"/\".join([obj.VAULT_PATH_FOR_DYNACONF, obj.current_env.lower()])\n client.secrets.kv.create_or_update_secret(\n path, secret=data, mount_point=mount_point\n )\n load(obj)\n\n\ndef list_envs(obj, path=\"\"):\n \"\"\"\n This function is a helper to get a list of all the existing envs in\n the source of data, the use case is:\n existing_envs = vault_loader.list_envs(settings)\n for env in exiting_envs:\n with settings.using_env(env): # switch to the env\n # do something with a key of that env\n\n :param obj: settings object\n :param path: path to the vault secrets\n :return: list containing all the keys at the given path\n \"\"\"\n client = get_client(obj)\n path = path or obj.get(\"VAULT_PATH_FOR_DYNACONF\")\n try:\n return client.list(f\"/secret/metadata/{path}\")[\"data\"][\"keys\"]\n except TypeError:\n return []\n", "path": "dynaconf/loaders/vault_loader.py"}]}
| 2,636 | 146 |
gh_patches_debug_9131
|
rasdani/github-patches
|
git_diff
|
dask__dask-7623
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Note on `Add x.str.cat (#3028)` (lines 125-126)
It's not clear to me why `String.str.cat` isn't supported in Dask when `others == None`. Not having the ability to concat a single series into a string is a significant cost, I think. Unless there's a reason for this condition, I recommend deleting lines 126-126.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dask/dataframe/accessor.py`
Content:
```
1 from functools import partial
2
3 import numpy as np
4 import pandas as pd
5
6 from ..utils import derived_from
7
8
9 def maybe_wrap_pandas(obj, x):
10 if isinstance(x, np.ndarray):
11 if isinstance(obj, pd.Series):
12 return pd.Series(x, index=obj.index, dtype=x.dtype)
13 return pd.Index(x)
14 return x
15
16
17 class Accessor:
18 """
19 Base class for pandas Accessor objects cat, dt, and str.
20
21 Notes
22 -----
23 Subclasses should define ``_accessor_name``
24 """
25
26 _not_implemented = set()
27
28 def __init__(self, series):
29 from .core import Series
30
31 if not isinstance(series, Series):
32 raise ValueError("Accessor cannot be initialized")
33
34 series_meta = series._meta
35 if hasattr(series_meta, "to_series"): # is index-like
36 series_meta = series_meta.to_series()
37 meta = getattr(series_meta, self._accessor_name)
38
39 self._meta = meta
40 self._series = series
41
42 @staticmethod
43 def _delegate_property(obj, accessor, attr):
44 out = getattr(getattr(obj, accessor, obj), attr)
45 return maybe_wrap_pandas(obj, out)
46
47 @staticmethod
48 def _delegate_method(obj, accessor, attr, args, kwargs):
49 out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)
50 return maybe_wrap_pandas(obj, out)
51
52 def _property_map(self, attr):
53 meta = self._delegate_property(self._series._meta, self._accessor_name, attr)
54 token = "%s-%s" % (self._accessor_name, attr)
55 return self._series.map_partitions(
56 self._delegate_property, self._accessor_name, attr, token=token, meta=meta
57 )
58
59 def _function_map(self, attr, *args, **kwargs):
60 if "meta" in kwargs:
61 meta = kwargs.pop("meta")
62 else:
63 meta = self._delegate_method(
64 self._series._meta_nonempty, self._accessor_name, attr, args, kwargs
65 )
66 token = "%s-%s" % (self._accessor_name, attr)
67 return self._series.map_partitions(
68 self._delegate_method,
69 self._accessor_name,
70 attr,
71 args,
72 kwargs,
73 meta=meta,
74 token=token,
75 )
76
77 @property
78 def _delegates(self):
79 return set(dir(self._meta)) - self._not_implemented
80
81 def __dir__(self):
82 o = self._delegates
83 o.update(self.__dict__)
84 o.update(dir(type(self)))
85 return list(o)
86
87 def __getattr__(self, key):
88 if key in self._delegates:
89 if callable(getattr(self._meta, key)):
90 return partial(self._function_map, key)
91 else:
92 return self._property_map(key)
93 else:
94 raise AttributeError(key)
95
96
97 class DatetimeAccessor(Accessor):
98 """Accessor object for datetimelike properties of the Series values.
99
100 Examples
101 --------
102
103 >>> s.dt.microsecond # doctest: +SKIP
104 """
105
106 _accessor_name = "dt"
107
108
109 class StringAccessor(Accessor):
110 """Accessor object for string properties of the Series values.
111
112 Examples
113 --------
114
115 >>> s.str.lower() # doctest: +SKIP
116 """
117
118 _accessor_name = "str"
119 _not_implemented = {"get_dummies"}
120
121 @derived_from(pd.core.strings.StringMethods)
122 def split(self, pat=None, n=-1, expand=False):
123 if expand:
124 if n == -1:
125 raise NotImplementedError(
126 "To use the expand parameter you must specify the number of "
127 "expected splits with the n= parameter. Usually n splits result in n+1 output columns."
128 )
129 else:
130 delimiter = " " if pat is None else pat
131 meta = self._series._meta._constructor(
132 [delimiter.join(["a"] * (n + 1))],
133 index=self._series._meta_nonempty[:1].index,
134 )
135 meta = meta.str.split(n=n, expand=expand, pat=pat)
136 else:
137 meta = (self._series.name, object)
138 return self._function_map("split", pat=pat, n=n, expand=expand, meta=meta)
139
140 @derived_from(pd.core.strings.StringMethods)
141 def cat(self, others=None, sep=None, na_rep=None):
142 from .core import Index, Series
143
144 if others is None:
145 raise NotImplementedError("x.str.cat() with `others == None`")
146
147 valid_types = (Series, Index, pd.Series, pd.Index)
148 if isinstance(others, valid_types):
149 others = [others]
150 elif not all(isinstance(a, valid_types) for a in others):
151 raise TypeError("others must be Series/Index")
152
153 return self._series.map_partitions(
154 str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta
155 )
156
157 @derived_from(pd.core.strings.StringMethods)
158 def extractall(self, pat, flags=0):
159 return self._series.map_partitions(
160 str_extractall, pat, flags, token="str-extractall"
161 )
162
163 def __getitem__(self, index):
164 return self._series.map_partitions(str_get, index, meta=self._series._meta)
165
166
167 def str_extractall(series, pat, flags):
168 return series.str.extractall(pat, flags=flags)
169
170
171 def str_get(series, index):
172 """ Implements series.str[index] """
173 return series.str[index]
174
175
176 def str_cat(self, *others, **kwargs):
177 return self.str.cat(others=others, **kwargs)
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dask/dataframe/accessor.py b/dask/dataframe/accessor.py
--- a/dask/dataframe/accessor.py
+++ b/dask/dataframe/accessor.py
@@ -142,7 +142,15 @@
from .core import Index, Series
if others is None:
- raise NotImplementedError("x.str.cat() with `others == None`")
+
+ def str_cat_none(x):
+
+ if isinstance(x, (Series, Index)):
+ x = x.compute()
+
+ return x.str.cat(sep=sep, na_rep=na_rep)
+
+ return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)
valid_types = (Series, Index, pd.Series, pd.Index)
if isinstance(others, valid_types):
|
{"golden_diff": "diff --git a/dask/dataframe/accessor.py b/dask/dataframe/accessor.py\n--- a/dask/dataframe/accessor.py\n+++ b/dask/dataframe/accessor.py\n@@ -142,7 +142,15 @@\n from .core import Index, Series\n \n if others is None:\n- raise NotImplementedError(\"x.str.cat() with `others == None`\")\n+\n+ def str_cat_none(x):\n+\n+ if isinstance(x, (Series, Index)):\n+ x = x.compute()\n+\n+ return x.str.cat(sep=sep, na_rep=na_rep)\n+\n+ return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)\n \n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n", "issue": "Note on `Add x.str.cat (#3028)` (lines 125-126)\nIt's not clear to me why `String.str.cat` isn't supported in Dask when `others == None`. Not having the ability to concat a single series into a string is a significant cost, I think. Unless there's a reason for this condition, I recommend deleting lines 126-126.\r\n\n", "before_files": [{"content": "from functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..utils import derived_from\n\n\ndef maybe_wrap_pandas(obj, x):\n if isinstance(x, np.ndarray):\n if isinstance(obj, pd.Series):\n return pd.Series(x, index=obj.index, dtype=x.dtype)\n return pd.Index(x)\n return x\n\n\nclass Accessor:\n \"\"\"\n Base class for pandas Accessor objects cat, dt, and str.\n\n Notes\n -----\n Subclasses should define ``_accessor_name``\n \"\"\"\n\n _not_implemented = set()\n\n def __init__(self, series):\n from .core import Series\n\n if not isinstance(series, Series):\n raise ValueError(\"Accessor cannot be initialized\")\n\n series_meta = series._meta\n if hasattr(series_meta, \"to_series\"): # is index-like\n series_meta = series_meta.to_series()\n meta = getattr(series_meta, self._accessor_name)\n\n self._meta = meta\n self._series = series\n\n @staticmethod\n def _delegate_property(obj, accessor, attr):\n out = getattr(getattr(obj, accessor, obj), attr)\n return maybe_wrap_pandas(obj, out)\n\n @staticmethod\n def _delegate_method(obj, accessor, attr, args, kwargs):\n out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)\n return maybe_wrap_pandas(obj, out)\n\n def _property_map(self, attr):\n meta = self._delegate_property(self._series._meta, self._accessor_name, attr)\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_property, self._accessor_name, attr, token=token, meta=meta\n )\n\n def _function_map(self, attr, *args, **kwargs):\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = self._delegate_method(\n self._series._meta_nonempty, self._accessor_name, attr, args, kwargs\n )\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_method,\n self._accessor_name,\n attr,\n args,\n kwargs,\n meta=meta,\n token=token,\n )\n\n @property\n def _delegates(self):\n return set(dir(self._meta)) - self._not_implemented\n\n def __dir__(self):\n o = self._delegates\n o.update(self.__dict__)\n o.update(dir(type(self)))\n return list(o)\n\n def __getattr__(self, key):\n if key in self._delegates:\n if callable(getattr(self._meta, key)):\n return partial(self._function_map, key)\n else:\n return self._property_map(key)\n else:\n raise AttributeError(key)\n\n\nclass DatetimeAccessor(Accessor):\n \"\"\"Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"dt\"\n\n\nclass StringAccessor(Accessor):\n \"\"\"Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"str\"\n _not_implemented = {\"get_dummies\"}\n\n @derived_from(pd.core.strings.StringMethods)\n def split(self, pat=None, n=-1, expand=False):\n if expand:\n if n == -1:\n raise NotImplementedError(\n \"To use the expand parameter you must specify the number of \"\n \"expected splits with the n= parameter. Usually n splits result in n+1 output columns.\"\n )\n else:\n delimiter = \" \" if pat is None else pat\n meta = self._series._meta._constructor(\n [delimiter.join([\"a\"] * (n + 1))],\n index=self._series._meta_nonempty[:1].index,\n )\n meta = meta.str.split(n=n, expand=expand, pat=pat)\n else:\n meta = (self._series.name, object)\n return self._function_map(\"split\", pat=pat, n=n, expand=expand, meta=meta)\n\n @derived_from(pd.core.strings.StringMethods)\n def cat(self, others=None, sep=None, na_rep=None):\n from .core import Index, Series\n\n if others is None:\n raise NotImplementedError(\"x.str.cat() with `others == None`\")\n\n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n others = [others]\n elif not all(isinstance(a, valid_types) for a in others):\n raise TypeError(\"others must be Series/Index\")\n\n return self._series.map_partitions(\n str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta\n )\n\n @derived_from(pd.core.strings.StringMethods)\n def extractall(self, pat, flags=0):\n return self._series.map_partitions(\n str_extractall, pat, flags, token=\"str-extractall\"\n )\n\n def __getitem__(self, index):\n return self._series.map_partitions(str_get, index, meta=self._series._meta)\n\n\ndef str_extractall(series, pat, flags):\n return series.str.extractall(pat, flags=flags)\n\n\ndef str_get(series, index):\n \"\"\" Implements series.str[index] \"\"\"\n return series.str[index]\n\n\ndef str_cat(self, *others, **kwargs):\n return self.str.cat(others=others, **kwargs)\n", "path": "dask/dataframe/accessor.py"}], "after_files": [{"content": "from functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..utils import derived_from\n\n\ndef maybe_wrap_pandas(obj, x):\n if isinstance(x, np.ndarray):\n if isinstance(obj, pd.Series):\n return pd.Series(x, index=obj.index, dtype=x.dtype)\n return pd.Index(x)\n return x\n\n\nclass Accessor:\n \"\"\"\n Base class for pandas Accessor objects cat, dt, and str.\n\n Notes\n -----\n Subclasses should define ``_accessor_name``\n \"\"\"\n\n _not_implemented = set()\n\n def __init__(self, series):\n from .core import Series\n\n if not isinstance(series, Series):\n raise ValueError(\"Accessor cannot be initialized\")\n\n series_meta = series._meta\n if hasattr(series_meta, \"to_series\"): # is index-like\n series_meta = series_meta.to_series()\n meta = getattr(series_meta, self._accessor_name)\n\n self._meta = meta\n self._series = series\n\n @staticmethod\n def _delegate_property(obj, accessor, attr):\n out = getattr(getattr(obj, accessor, obj), attr)\n return maybe_wrap_pandas(obj, out)\n\n @staticmethod\n def _delegate_method(obj, accessor, attr, args, kwargs):\n out = getattr(getattr(obj, accessor, obj), attr)(*args, **kwargs)\n return maybe_wrap_pandas(obj, out)\n\n def _property_map(self, attr):\n meta = self._delegate_property(self._series._meta, self._accessor_name, attr)\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_property, self._accessor_name, attr, token=token, meta=meta\n )\n\n def _function_map(self, attr, *args, **kwargs):\n if \"meta\" in kwargs:\n meta = kwargs.pop(\"meta\")\n else:\n meta = self._delegate_method(\n self._series._meta_nonempty, self._accessor_name, attr, args, kwargs\n )\n token = \"%s-%s\" % (self._accessor_name, attr)\n return self._series.map_partitions(\n self._delegate_method,\n self._accessor_name,\n attr,\n args,\n kwargs,\n meta=meta,\n token=token,\n )\n\n @property\n def _delegates(self):\n return set(dir(self._meta)) - self._not_implemented\n\n def __dir__(self):\n o = self._delegates\n o.update(self.__dict__)\n o.update(dir(type(self)))\n return list(o)\n\n def __getattr__(self, key):\n if key in self._delegates:\n if callable(getattr(self._meta, key)):\n return partial(self._function_map, key)\n else:\n return self._property_map(key)\n else:\n raise AttributeError(key)\n\n\nclass DatetimeAccessor(Accessor):\n \"\"\"Accessor object for datetimelike properties of the Series values.\n\n Examples\n --------\n\n >>> s.dt.microsecond # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"dt\"\n\n\nclass StringAccessor(Accessor):\n \"\"\"Accessor object for string properties of the Series values.\n\n Examples\n --------\n\n >>> s.str.lower() # doctest: +SKIP\n \"\"\"\n\n _accessor_name = \"str\"\n _not_implemented = {\"get_dummies\"}\n\n @derived_from(pd.core.strings.StringMethods)\n def split(self, pat=None, n=-1, expand=False):\n if expand:\n if n == -1:\n raise NotImplementedError(\n \"To use the expand parameter you must specify the number of \"\n \"expected splits with the n= parameter. Usually n splits result in n+1 output columns.\"\n )\n else:\n delimiter = \" \" if pat is None else pat\n meta = self._series._meta._constructor(\n [delimiter.join([\"a\"] * (n + 1))],\n index=self._series._meta_nonempty[:1].index,\n )\n meta = meta.str.split(n=n, expand=expand, pat=pat)\n else:\n meta = (self._series.name, object)\n return self._function_map(\"split\", pat=pat, n=n, expand=expand, meta=meta)\n\n @derived_from(pd.core.strings.StringMethods)\n def cat(self, others=None, sep=None, na_rep=None):\n from .core import Index, Series\n\n if others is None:\n\n def str_cat_none(x):\n\n if isinstance(x, (Series, Index)):\n x = x.compute()\n\n return x.str.cat(sep=sep, na_rep=na_rep)\n\n return self._series.reduction(chunk=str_cat_none, aggregate=str_cat_none)\n\n valid_types = (Series, Index, pd.Series, pd.Index)\n if isinstance(others, valid_types):\n others = [others]\n elif not all(isinstance(a, valid_types) for a in others):\n raise TypeError(\"others must be Series/Index\")\n\n return self._series.map_partitions(\n str_cat, *others, sep=sep, na_rep=na_rep, meta=self._series._meta\n )\n\n @derived_from(pd.core.strings.StringMethods)\n def extractall(self, pat, flags=0):\n return self._series.map_partitions(\n str_extractall, pat, flags, token=\"str-extractall\"\n )\n\n def __getitem__(self, index):\n return self._series.map_partitions(str_get, index, meta=self._series._meta)\n\n\ndef str_extractall(series, pat, flags):\n return series.str.extractall(pat, flags=flags)\n\n\ndef str_get(series, index):\n \"\"\" Implements series.str[index] \"\"\"\n return series.str[index]\n\n\ndef str_cat(self, *others, **kwargs):\n return self.str.cat(others=others, **kwargs)\n", "path": "dask/dataframe/accessor.py"}]}
| 2,032 | 176 |
gh_patches_debug_43
|
rasdani/github-patches
|
git_diff
|
python-discord__site-268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ugly prefix on all ID links.
Currently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment.
It would be better if this simply said `#development-environment`, so let's change that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/__init__.py`
Content:
```
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py
--- a/pydis_site/__init__.py
+++ b/pydis_site/__init__.py
@@ -0,0 +1,4 @@
+from wiki.plugins.macros.mdx import toc
+
+# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.
+toc.HEADER_ID_PREFIX = ''
|
{"golden_diff": "diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py\n--- a/pydis_site/__init__.py\n+++ b/pydis_site/__init__.py\n@@ -0,0 +1,4 @@\n+from wiki.plugins.macros.mdx import toc\n+\n+# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.\n+toc.HEADER_ID_PREFIX = ''\n", "issue": "Ugly prefix on all ID links.\nCurrently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment.\r\n\r\nIt would be better if this simply said `#development-environment`, so let's change that.\n", "before_files": [{"content": "", "path": "pydis_site/__init__.py"}], "after_files": [{"content": "from wiki.plugins.macros.mdx import toc\n\n# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.\ntoc.HEADER_ID_PREFIX = ''\n", "path": "pydis_site/__init__.py"}]}
| 350 | 91 |
gh_patches_debug_36586
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-1055
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support some interface to the shell in our XML
Users like @amametjanov are asking for access to the shell within our XML files. Something like:
```
<env name="NETCDF_PATH">$SHELL{which nc-config | xargs dirname | xargs dirname}</env>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/XML/generic_xml.py`
Content:
```
1 """
2 Common interface to XML files, this is an abstract class and is expected to
3 be used by other XML interface modules and not directly.
4 """
5 from CIME.XML.standard_module_setup import *
6 from distutils.spawn import find_executable
7 from xml.dom import minidom
8 from CIME.utils import expect, get_cime_root
9
10 logger = logging.getLogger(__name__)
11
12 class GenericXML(object):
13
14 def __init__(self, infile=None, schema=None):
15 """
16 Initialize an object
17 """
18
19 logger.debug("Initializing %s" , infile)
20 self.tree = None
21
22 if infile == None:
23 # if file is not defined just return
24 self.filename = None
25 return
26
27 if os.path.isfile(infile) and os.access(infile, os.R_OK):
28 # If file is defined and exists, read it
29 self.filename = infile
30 self.read(infile, schema)
31 else:
32 # if file does not exist create a root xml element
33 # and set it's id to file
34
35 logger.debug("File %s does not exists." , infile)
36 expect("$" not in infile,"File path not fully resolved %s"%infile)
37
38 self.filename = infile
39 root = ET.Element("xml")
40 self.root = ET.SubElement(root, "file")
41 self.root.set("id", os.path.basename(infile))
42 self.tree = ET.ElementTree(root)
43
44 def read(self, infile, schema=None):
45 """
46 Read and parse an xml file into the object
47 """
48 logger.debug("read: " + infile)
49 if self.tree:
50 self.root.append(ET.parse(infile).getroot())
51 else:
52 self.tree = ET.parse(infile)
53 self.root = self.tree.getroot()
54
55 if schema is not None and self.get_version() > 1.0:
56 self.validate_xml_file(infile, schema)
57
58 logger.debug("File version is %s"%str(self.get_version()))
59
60 def get_version(self):
61 version = self.root.get("version")
62 version = 1.0 if version is None else float(version)
63 return version
64
65 def write(self, outfile=None):
66 """
67 Write an xml file from data in self
68 """
69 if outfile is None:
70 outfile = self.filename
71
72 logger.debug("write: " + outfile)
73
74 xmlstr = self.get_raw_record()
75
76 # xmllint provides a better format option for the output file
77 xmllint = find_executable("xmllint")
78 if xmllint is not None:
79 run_cmd_no_fail("%s --format --output %s -"%(xmllint,outfile), input_str=xmlstr)
80 else:
81 doc = minidom.parseString(xmlstr)
82 with open(outfile,'w') as xmlout:
83 doc.writexml(xmlout,addindent=' ')
84
85 def get_node(self, nodename, attributes=None, root=None, xpath=None):
86 """
87 Get an xml element matching nodename with optional attributes.
88
89 Error unless exactly one match.
90 """
91
92 nodes = self.get_nodes(nodename, attributes=attributes, root=root, xpath=xpath)
93
94 expect(len(nodes) == 1, "Incorrect number of matches, %d, for nodename '%s' and attrs '%s' in file '%s'" %
95 (len(nodes), nodename, attributes, self.filename))
96 return nodes[0]
97
98 def get_optional_node(self, nodename, attributes=None, root=None, xpath=None):
99 """
100 Get an xml element matching nodename with optional attributes.
101
102 Return None if no match.
103 """
104 nodes = self.get_nodes(nodename, attributes=attributes, root=root, xpath=xpath)
105
106 expect(len(nodes) <= 1, "Multiple matches for nodename '%s' and attrs '%s' in file '%s'" %
107 (nodename, attributes, self.filename))
108 return nodes[0] if nodes else None
109
110 def get_nodes(self, nodename, attributes=None, root=None, xpath=None):
111
112 logger.debug("(get_nodes) Input values: %s , %s , %s , %s , %s" , self.__class__.__name__ , nodename , attributes , root , xpath)
113
114 if root is None:
115 root = self.root
116 nodes = []
117
118 expect(attributes is None or xpath is None,
119 " Arguments attributes and xpath are exclusive")
120 if xpath is None:
121 xpath = ".//"+nodename
122
123 if attributes:
124 # xml.etree has limited support for xpath and does not allow more than
125 # one attribute in an xpath query so we query seperately for each attribute
126 # and create a result with the intersection of those lists
127
128 for key, value in attributes.iteritems():
129 if value is not None:
130 expect(isinstance(value, basestring),
131 " Bad value passed for key %s"%key)
132 xpath = ".//%s[@%s=\'%s\']" % (nodename, key, value)
133 logger.debug("xpath is %s"%xpath)
134
135 try:
136 newnodes = root.findall(xpath)
137 except Exception as e:
138 expect(False, "Bad xpath search term '%s', error: %s" % (xpath, e))
139
140 if not nodes:
141 nodes = newnodes
142 else:
143 for node in nodes[:]:
144 if node not in newnodes:
145 nodes.remove(node)
146 if not nodes:
147 return []
148
149 else:
150 logger.debug("xpath: %s" , xpath)
151 nodes = root.findall(xpath)
152
153 logger.debug("Returning %s nodes (%s)" , len(nodes), nodes)
154
155 return nodes
156
157 def add_child(self, node, root=None):
158 """
159 Add element node to self at root
160 """
161 if root is None:
162 root = self.root
163 self.root.append(node)
164
165 def get_value(self, item, attribute=None, resolved=True, subgroup=None): # pylint: disable=unused-argument
166 """
167 get_value is expected to be defined by the derived classes, if you get here
168 the value was not found in the class.
169 """
170 logger.debug("Get Value for " + item)
171 return None
172
173 def get_values(self, vid, attribute=None, resolved=True, subgroup=None):# pylint: disable=unused-argument
174 logger.debug("Get Values for " + vid)
175 return []
176
177 def set_value(self, vid, value, subgroup=None, ignore_type=True): # pylint: disable=unused-argument
178 """
179 ignore_type is not used in this flavor
180 """
181 valnodes = self.get_nodes(vid)
182 if valnodes:
183 for node in valnodes:
184 node.text = value
185
186 def get_resolved_value(self, raw_value):
187 """
188 A value in the xml file may contain references to other xml
189 variables or to environment variables. These are refered to in
190 the perl style with $name and $ENV{name}.
191
192 >>> obj = GenericXML()
193 >>> os.environ["FOO"] = "BAR"
194 >>> os.environ["BAZ"] = "BARF"
195 >>> obj.get_resolved_value("one $ENV{FOO} two $ENV{BAZ} three")
196 'one BAR two BARF three'
197 >>> obj.get_resolved_value("2 + 3 - 1")
198 '4'
199 >>> obj.get_resolved_value("0001-01-01")
200 '0001-01-01'
201 """
202 logger.debug("raw_value %s" % raw_value)
203 reference_re = re.compile(r'\${?(\w+)}?')
204 env_ref_re = re.compile(r'\$ENV\{(\w+)\}')
205 math_re = re.compile(r'\s[+-/*]\s')
206 item_data = raw_value
207
208 if item_data is None:
209 return None
210
211 if type(item_data) is not str:
212 return item_data
213
214 for m in env_ref_re.finditer(item_data):
215 logger.debug("look for %s in env" % item_data)
216 env_var = m.groups()[0]
217 expect(env_var in os.environ, "Undefined env var '%s'" % env_var)
218 item_data = item_data.replace(m.group(), os.environ[env_var])
219
220 for m in reference_re.finditer(item_data):
221 var = m.groups()[0]
222 logger.debug("find: %s" % var)
223 ref = self.get_value(var)
224 if ref is not None:
225 logger.debug("resolve: " + str(ref))
226 item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref)))
227 elif var == "CIMEROOT":
228 cimeroot = get_cime_root()
229 item_data = item_data.replace(m.group(), cimeroot)
230 elif var == "SRCROOT":
231 srcroot = os.path.join(get_cime_root(),"..")
232 item_data = item_data.replace(m.group(), srcroot)
233 elif var in os.environ:
234 # this is a list of suppressed warnings (things normally expected to be resolved in env)
235 if var not in ("USER",):
236 logging.debug("Resolved from env: " + var)
237 item_data = item_data.replace(m.group(), os.environ[var])
238 if math_re.search(item_data):
239 try:
240 tmp = eval(item_data)
241 except:
242 tmp = item_data
243 item_data = str(tmp)
244
245 return item_data
246
247 def add_sub_node(self, node, subnode_name, subnode_text):
248 expect(node is not None," Bad value passed")
249 subnode = ET.Element(subnode_name)
250 subnode.text = subnode_text
251 node.append(subnode)
252 return node
253
254 def validate_xml_file(self, filename, schema):
255 """
256 validate an XML file against a provided schema file using pylint
257 """
258 expect(os.path.isfile(filename),"xml file not found %s"%filename)
259 expect(os.path.isfile(schema),"schema file not found %s"%schema)
260 xmllint = find_executable("xmllint")
261 if xmllint is not None:
262 logger.debug("Checking file %s against schema %s"%(filename, schema))
263 run_cmd_no_fail("%s --noout --schema %s %s"%(xmllint, schema, filename))
264 else:
265 logger.warn("xmllint not found, could not validate file %s"%filename)
266
267 def get_element_text(self, element_name, attributes=None, root=None, xpath=None):
268 element_node = self.get_optional_node(element_name, attributes, root, xpath)
269 if element_node is not None:
270 return element_node.text
271 return None
272
273 def set_element_text(self, element_name, new_text, attributes=None, root=None, xpath=None):
274 element_node = self.get_optional_node(element_name, attributes, root, xpath)
275 if element_node is not None:
276 element_node.text = new_text
277 return new_text
278 return None
279
280 def get_raw_record(self, root=None):
281 if root is None:
282 root = self.root
283 try:
284 xmlstr = ET.tostring(root)
285 except ET.ParseError as e:
286 ET.dump(root)
287 expect(False, "Could not write file %s, xml formatting error '%s'" % (self.filename, e))
288 return xmlstr
289
290
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/utils/python/CIME/XML/generic_xml.py b/utils/python/CIME/XML/generic_xml.py
--- a/utils/python/CIME/XML/generic_xml.py
+++ b/utils/python/CIME/XML/generic_xml.py
@@ -7,6 +7,8 @@
from xml.dom import minidom
from CIME.utils import expect, get_cime_root
+import getpass
+
logger = logging.getLogger(__name__)
class GenericXML(object):
@@ -198,10 +200,13 @@
'4'
>>> obj.get_resolved_value("0001-01-01")
'0001-01-01'
+ >>> obj.get_resolved_value("$SHELL{echo hi}")
+ 'hi'
"""
logger.debug("raw_value %s" % raw_value)
reference_re = re.compile(r'\${?(\w+)}?')
env_ref_re = re.compile(r'\$ENV\{(\w+)\}')
+ shell_ref_re = re.compile(r'\$SHELL\{([^}]+)\}')
math_re = re.compile(r'\s[+-/*]\s')
item_data = raw_value
@@ -217,6 +222,11 @@
expect(env_var in os.environ, "Undefined env var '%s'" % env_var)
item_data = item_data.replace(m.group(), os.environ[env_var])
+ for s in shell_ref_re.finditer(item_data):
+ logger.debug("execute %s in shell" % item_data)
+ shell_cmd = s.groups()[0]
+ item_data = item_data.replace(s.group(), run_cmd_no_fail(shell_cmd))
+
for m in reference_re.finditer(item_data):
var = m.groups()[0]
logger.debug("find: %s" % var)
@@ -230,11 +240,9 @@
elif var == "SRCROOT":
srcroot = os.path.join(get_cime_root(),"..")
item_data = item_data.replace(m.group(), srcroot)
- elif var in os.environ:
- # this is a list of suppressed warnings (things normally expected to be resolved in env)
- if var not in ("USER",):
- logging.debug("Resolved from env: " + var)
- item_data = item_data.replace(m.group(), os.environ[var])
+ elif var == "USER":
+ item_data = item_data.replace(m.group(), getpass.getuser())
+
if math_re.search(item_data):
try:
tmp = eval(item_data)
|
{"golden_diff": "diff --git a/utils/python/CIME/XML/generic_xml.py b/utils/python/CIME/XML/generic_xml.py\n--- a/utils/python/CIME/XML/generic_xml.py\n+++ b/utils/python/CIME/XML/generic_xml.py\n@@ -7,6 +7,8 @@\n from xml.dom import minidom\n from CIME.utils import expect, get_cime_root\n \n+import getpass\n+\n logger = logging.getLogger(__name__)\n \n class GenericXML(object):\n@@ -198,10 +200,13 @@\n '4'\n >>> obj.get_resolved_value(\"0001-01-01\")\n '0001-01-01'\n+ >>> obj.get_resolved_value(\"$SHELL{echo hi}\")\n+ 'hi'\n \"\"\"\n logger.debug(\"raw_value %s\" % raw_value)\n reference_re = re.compile(r'\\${?(\\w+)}?')\n env_ref_re = re.compile(r'\\$ENV\\{(\\w+)\\}')\n+ shell_ref_re = re.compile(r'\\$SHELL\\{([^}]+)\\}')\n math_re = re.compile(r'\\s[+-/*]\\s')\n item_data = raw_value\n \n@@ -217,6 +222,11 @@\n expect(env_var in os.environ, \"Undefined env var '%s'\" % env_var)\n item_data = item_data.replace(m.group(), os.environ[env_var])\n \n+ for s in shell_ref_re.finditer(item_data):\n+ logger.debug(\"execute %s in shell\" % item_data)\n+ shell_cmd = s.groups()[0]\n+ item_data = item_data.replace(s.group(), run_cmd_no_fail(shell_cmd))\n+\n for m in reference_re.finditer(item_data):\n var = m.groups()[0]\n logger.debug(\"find: %s\" % var)\n@@ -230,11 +240,9 @@\n elif var == \"SRCROOT\":\n srcroot = os.path.join(get_cime_root(),\"..\")\n item_data = item_data.replace(m.group(), srcroot)\n- elif var in os.environ:\n- # this is a list of suppressed warnings (things normally expected to be resolved in env)\n- if var not in (\"USER\",):\n- logging.debug(\"Resolved from env: \" + var)\n- item_data = item_data.replace(m.group(), os.environ[var])\n+ elif var == \"USER\":\n+ item_data = item_data.replace(m.group(), getpass.getuser())\n+\n if math_re.search(item_data):\n try:\n tmp = eval(item_data)\n", "issue": "Support some interface to the shell in our XML\nUsers like @amametjanov are asking for access to the shell within our XML files. Something like:\r\n\r\n```\r\n<env name=\"NETCDF_PATH\">$SHELL{which nc-config | xargs dirname | xargs dirname}</env>\r\n```\n", "before_files": [{"content": "\"\"\"\nCommon interface to XML files, this is an abstract class and is expected to\nbe used by other XML interface modules and not directly.\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom distutils.spawn import find_executable\nfrom xml.dom import minidom\nfrom CIME.utils import expect, get_cime_root\n\nlogger = logging.getLogger(__name__)\n\nclass GenericXML(object):\n\n def __init__(self, infile=None, schema=None):\n \"\"\"\n Initialize an object\n \"\"\"\n\n logger.debug(\"Initializing %s\" , infile)\n self.tree = None\n\n if infile == None:\n # if file is not defined just return\n self.filename = None\n return\n\n if os.path.isfile(infile) and os.access(infile, os.R_OK):\n # If file is defined and exists, read it\n self.filename = infile\n self.read(infile, schema)\n else:\n # if file does not exist create a root xml element\n # and set it's id to file\n\n logger.debug(\"File %s does not exists.\" , infile)\n expect(\"$\" not in infile,\"File path not fully resolved %s\"%infile)\n\n self.filename = infile\n root = ET.Element(\"xml\")\n self.root = ET.SubElement(root, \"file\")\n self.root.set(\"id\", os.path.basename(infile))\n self.tree = ET.ElementTree(root)\n\n def read(self, infile, schema=None):\n \"\"\"\n Read and parse an xml file into the object\n \"\"\"\n logger.debug(\"read: \" + infile)\n if self.tree:\n self.root.append(ET.parse(infile).getroot())\n else:\n self.tree = ET.parse(infile)\n self.root = self.tree.getroot()\n\n if schema is not None and self.get_version() > 1.0:\n self.validate_xml_file(infile, schema)\n\n logger.debug(\"File version is %s\"%str(self.get_version()))\n\n def get_version(self):\n version = self.root.get(\"version\")\n version = 1.0 if version is None else float(version)\n return version\n\n def write(self, outfile=None):\n \"\"\"\n Write an xml file from data in self\n \"\"\"\n if outfile is None:\n outfile = self.filename\n\n logger.debug(\"write: \" + outfile)\n\n xmlstr = self.get_raw_record()\n\n # xmllint provides a better format option for the output file\n xmllint = find_executable(\"xmllint\")\n if xmllint is not None:\n run_cmd_no_fail(\"%s --format --output %s -\"%(xmllint,outfile), input_str=xmlstr)\n else:\n doc = minidom.parseString(xmlstr)\n with open(outfile,'w') as xmlout:\n doc.writexml(xmlout,addindent=' ')\n\n def get_node(self, nodename, attributes=None, root=None, xpath=None):\n \"\"\"\n Get an xml element matching nodename with optional attributes.\n\n Error unless exactly one match.\n \"\"\"\n\n nodes = self.get_nodes(nodename, attributes=attributes, root=root, xpath=xpath)\n\n expect(len(nodes) == 1, \"Incorrect number of matches, %d, for nodename '%s' and attrs '%s' in file '%s'\" %\n (len(nodes), nodename, attributes, self.filename))\n return nodes[0]\n\n def get_optional_node(self, nodename, attributes=None, root=None, xpath=None):\n \"\"\"\n Get an xml element matching nodename with optional attributes.\n\n Return None if no match.\n \"\"\"\n nodes = self.get_nodes(nodename, attributes=attributes, root=root, xpath=xpath)\n\n expect(len(nodes) <= 1, \"Multiple matches for nodename '%s' and attrs '%s' in file '%s'\" %\n (nodename, attributes, self.filename))\n return nodes[0] if nodes else None\n\n def get_nodes(self, nodename, attributes=None, root=None, xpath=None):\n\n logger.debug(\"(get_nodes) Input values: %s , %s , %s , %s , %s\" , self.__class__.__name__ , nodename , attributes , root , xpath)\n\n if root is None:\n root = self.root\n nodes = []\n\n expect(attributes is None or xpath is None,\n \" Arguments attributes and xpath are exclusive\")\n if xpath is None:\n xpath = \".//\"+nodename\n\n if attributes:\n # xml.etree has limited support for xpath and does not allow more than\n # one attribute in an xpath query so we query seperately for each attribute\n # and create a result with the intersection of those lists\n\n for key, value in attributes.iteritems():\n if value is not None:\n expect(isinstance(value, basestring),\n \" Bad value passed for key %s\"%key)\n xpath = \".//%s[@%s=\\'%s\\']\" % (nodename, key, value)\n logger.debug(\"xpath is %s\"%xpath)\n\n try:\n newnodes = root.findall(xpath)\n except Exception as e:\n expect(False, \"Bad xpath search term '%s', error: %s\" % (xpath, e))\n\n if not nodes:\n nodes = newnodes\n else:\n for node in nodes[:]:\n if node not in newnodes:\n nodes.remove(node)\n if not nodes:\n return []\n\n else:\n logger.debug(\"xpath: %s\" , xpath)\n nodes = root.findall(xpath)\n\n logger.debug(\"Returning %s nodes (%s)\" , len(nodes), nodes)\n\n return nodes\n\n def add_child(self, node, root=None):\n \"\"\"\n Add element node to self at root\n \"\"\"\n if root is None:\n root = self.root\n self.root.append(node)\n\n def get_value(self, item, attribute=None, resolved=True, subgroup=None): # pylint: disable=unused-argument\n \"\"\"\n get_value is expected to be defined by the derived classes, if you get here\n the value was not found in the class.\n \"\"\"\n logger.debug(\"Get Value for \" + item)\n return None\n\n def get_values(self, vid, attribute=None, resolved=True, subgroup=None):# pylint: disable=unused-argument\n logger.debug(\"Get Values for \" + vid)\n return []\n\n def set_value(self, vid, value, subgroup=None, ignore_type=True): # pylint: disable=unused-argument\n \"\"\"\n ignore_type is not used in this flavor\n \"\"\"\n valnodes = self.get_nodes(vid)\n if valnodes:\n for node in valnodes:\n node.text = value\n\n def get_resolved_value(self, raw_value):\n \"\"\"\n A value in the xml file may contain references to other xml\n variables or to environment variables. These are refered to in\n the perl style with $name and $ENV{name}.\n\n >>> obj = GenericXML()\n >>> os.environ[\"FOO\"] = \"BAR\"\n >>> os.environ[\"BAZ\"] = \"BARF\"\n >>> obj.get_resolved_value(\"one $ENV{FOO} two $ENV{BAZ} three\")\n 'one BAR two BARF three'\n >>> obj.get_resolved_value(\"2 + 3 - 1\")\n '4'\n >>> obj.get_resolved_value(\"0001-01-01\")\n '0001-01-01'\n \"\"\"\n logger.debug(\"raw_value %s\" % raw_value)\n reference_re = re.compile(r'\\${?(\\w+)}?')\n env_ref_re = re.compile(r'\\$ENV\\{(\\w+)\\}')\n math_re = re.compile(r'\\s[+-/*]\\s')\n item_data = raw_value\n\n if item_data is None:\n return None\n\n if type(item_data) is not str:\n return item_data\n\n for m in env_ref_re.finditer(item_data):\n logger.debug(\"look for %s in env\" % item_data)\n env_var = m.groups()[0]\n expect(env_var in os.environ, \"Undefined env var '%s'\" % env_var)\n item_data = item_data.replace(m.group(), os.environ[env_var])\n\n for m in reference_re.finditer(item_data):\n var = m.groups()[0]\n logger.debug(\"find: %s\" % var)\n ref = self.get_value(var)\n if ref is not None:\n logger.debug(\"resolve: \" + str(ref))\n item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref)))\n elif var == \"CIMEROOT\":\n cimeroot = get_cime_root()\n item_data = item_data.replace(m.group(), cimeroot)\n elif var == \"SRCROOT\":\n srcroot = os.path.join(get_cime_root(),\"..\")\n item_data = item_data.replace(m.group(), srcroot)\n elif var in os.environ:\n # this is a list of suppressed warnings (things normally expected to be resolved in env)\n if var not in (\"USER\",):\n logging.debug(\"Resolved from env: \" + var)\n item_data = item_data.replace(m.group(), os.environ[var])\n if math_re.search(item_data):\n try:\n tmp = eval(item_data)\n except:\n tmp = item_data\n item_data = str(tmp)\n\n return item_data\n\n def add_sub_node(self, node, subnode_name, subnode_text):\n expect(node is not None,\" Bad value passed\")\n subnode = ET.Element(subnode_name)\n subnode.text = subnode_text\n node.append(subnode)\n return node\n\n def validate_xml_file(self, filename, schema):\n \"\"\"\n validate an XML file against a provided schema file using pylint\n \"\"\"\n expect(os.path.isfile(filename),\"xml file not found %s\"%filename)\n expect(os.path.isfile(schema),\"schema file not found %s\"%schema)\n xmllint = find_executable(\"xmllint\")\n if xmllint is not None:\n logger.debug(\"Checking file %s against schema %s\"%(filename, schema))\n run_cmd_no_fail(\"%s --noout --schema %s %s\"%(xmllint, schema, filename))\n else:\n logger.warn(\"xmllint not found, could not validate file %s\"%filename)\n\n def get_element_text(self, element_name, attributes=None, root=None, xpath=None):\n element_node = self.get_optional_node(element_name, attributes, root, xpath)\n if element_node is not None:\n return element_node.text\n return None\n\n def set_element_text(self, element_name, new_text, attributes=None, root=None, xpath=None):\n element_node = self.get_optional_node(element_name, attributes, root, xpath)\n if element_node is not None:\n element_node.text = new_text\n return new_text\n return None\n\n def get_raw_record(self, root=None):\n if root is None:\n root = self.root\n try:\n xmlstr = ET.tostring(root)\n except ET.ParseError as e:\n ET.dump(root)\n expect(False, \"Could not write file %s, xml formatting error '%s'\" % (self.filename, e))\n return xmlstr\n\n\n", "path": "utils/python/CIME/XML/generic_xml.py"}], "after_files": [{"content": "\"\"\"\nCommon interface to XML files, this is an abstract class and is expected to\nbe used by other XML interface modules and not directly.\n\"\"\"\nfrom CIME.XML.standard_module_setup import *\nfrom distutils.spawn import find_executable\nfrom xml.dom import minidom\nfrom CIME.utils import expect, get_cime_root\n\nimport getpass\n\nlogger = logging.getLogger(__name__)\n\nclass GenericXML(object):\n\n def __init__(self, infile=None, schema=None):\n \"\"\"\n Initialize an object\n \"\"\"\n\n logger.debug(\"Initializing %s\" , infile)\n self.tree = None\n\n if infile == None:\n # if file is not defined just return\n self.filename = None\n return\n\n if os.path.isfile(infile) and os.access(infile, os.R_OK):\n # If file is defined and exists, read it\n self.filename = infile\n self.read(infile, schema)\n else:\n # if file does not exist create a root xml element\n # and set it's id to file\n\n logger.debug(\"File %s does not exists.\" , infile)\n expect(\"$\" not in infile,\"File path not fully resolved %s\"%infile)\n\n self.filename = infile\n root = ET.Element(\"xml\")\n self.root = ET.SubElement(root, \"file\")\n self.root.set(\"id\", os.path.basename(infile))\n self.tree = ET.ElementTree(root)\n\n def read(self, infile, schema=None):\n \"\"\"\n Read and parse an xml file into the object\n \"\"\"\n logger.debug(\"read: \" + infile)\n if self.tree:\n self.root.append(ET.parse(infile).getroot())\n else:\n self.tree = ET.parse(infile)\n self.root = self.tree.getroot()\n\n if schema is not None and self.get_version() > 1.0:\n self.validate_xml_file(infile, schema)\n\n logger.debug(\"File version is %s\"%str(self.get_version()))\n\n def get_version(self):\n version = self.root.get(\"version\")\n version = 1.0 if version is None else float(version)\n return version\n\n def write(self, outfile=None):\n \"\"\"\n Write an xml file from data in self\n \"\"\"\n if outfile is None:\n outfile = self.filename\n\n logger.debug(\"write: \" + outfile)\n\n xmlstr = self.get_raw_record()\n\n # xmllint provides a better format option for the output file\n xmllint = find_executable(\"xmllint\")\n if xmllint is not None:\n run_cmd_no_fail(\"%s --format --output %s -\"%(xmllint,outfile), input_str=xmlstr)\n else:\n doc = minidom.parseString(xmlstr)\n with open(outfile,'w') as xmlout:\n doc.writexml(xmlout,addindent=' ')\n\n def get_node(self, nodename, attributes=None, root=None, xpath=None):\n \"\"\"\n Get an xml element matching nodename with optional attributes.\n\n Error unless exactly one match.\n \"\"\"\n\n nodes = self.get_nodes(nodename, attributes=attributes, root=root, xpath=xpath)\n\n expect(len(nodes) == 1, \"Incorrect number of matches, %d, for nodename '%s' and attrs '%s' in file '%s'\" %\n (len(nodes), nodename, attributes, self.filename))\n return nodes[0]\n\n def get_optional_node(self, nodename, attributes=None, root=None, xpath=None):\n \"\"\"\n Get an xml element matching nodename with optional attributes.\n\n Return None if no match.\n \"\"\"\n nodes = self.get_nodes(nodename, attributes=attributes, root=root, xpath=xpath)\n\n expect(len(nodes) <= 1, \"Multiple matches for nodename '%s' and attrs '%s' in file '%s'\" %\n (nodename, attributes, self.filename))\n return nodes[0] if nodes else None\n\n def get_nodes(self, nodename, attributes=None, root=None, xpath=None):\n\n logger.debug(\"(get_nodes) Input values: %s , %s , %s , %s , %s\" , self.__class__.__name__ , nodename , attributes , root , xpath)\n\n if root is None:\n root = self.root\n nodes = []\n\n expect(attributes is None or xpath is None,\n \" Arguments attributes and xpath are exclusive\")\n if xpath is None:\n xpath = \".//\"+nodename\n\n if attributes:\n # xml.etree has limited support for xpath and does not allow more than\n # one attribute in an xpath query so we query seperately for each attribute\n # and create a result with the intersection of those lists\n\n for key, value in attributes.iteritems():\n if value is not None:\n expect(isinstance(value, basestring),\n \" Bad value passed for key %s\"%key)\n xpath = \".//%s[@%s=\\'%s\\']\" % (nodename, key, value)\n logger.debug(\"xpath is %s\"%xpath)\n\n try:\n newnodes = root.findall(xpath)\n except Exception as e:\n expect(False, \"Bad xpath search term '%s', error: %s\" % (xpath, e))\n\n if not nodes:\n nodes = newnodes\n else:\n for node in nodes[:]:\n if node not in newnodes:\n nodes.remove(node)\n if not nodes:\n return []\n\n else:\n logger.debug(\"xpath: %s\" , xpath)\n nodes = root.findall(xpath)\n\n logger.debug(\"Returning %s nodes (%s)\" , len(nodes), nodes)\n\n return nodes\n\n def add_child(self, node, root=None):\n \"\"\"\n Add element node to self at root\n \"\"\"\n if root is None:\n root = self.root\n self.root.append(node)\n\n def get_value(self, item, attribute=None, resolved=True, subgroup=None): # pylint: disable=unused-argument\n \"\"\"\n get_value is expected to be defined by the derived classes, if you get here\n the value was not found in the class.\n \"\"\"\n logger.debug(\"Get Value for \" + item)\n return None\n\n def get_values(self, vid, attribute=None, resolved=True, subgroup=None):# pylint: disable=unused-argument\n logger.debug(\"Get Values for \" + vid)\n return []\n\n def set_value(self, vid, value, subgroup=None, ignore_type=True): # pylint: disable=unused-argument\n \"\"\"\n ignore_type is not used in this flavor\n \"\"\"\n valnodes = self.get_nodes(vid)\n if valnodes:\n for node in valnodes:\n node.text = value\n\n def get_resolved_value(self, raw_value):\n \"\"\"\n A value in the xml file may contain references to other xml\n variables or to environment variables. These are refered to in\n the perl style with $name and $ENV{name}.\n\n >>> obj = GenericXML()\n >>> os.environ[\"FOO\"] = \"BAR\"\n >>> os.environ[\"BAZ\"] = \"BARF\"\n >>> obj.get_resolved_value(\"one $ENV{FOO} two $ENV{BAZ} three\")\n 'one BAR two BARF three'\n >>> obj.get_resolved_value(\"2 + 3 - 1\")\n '4'\n >>> obj.get_resolved_value(\"0001-01-01\")\n '0001-01-01'\n >>> obj.get_resolved_value(\"$SHELL{echo hi}\")\n 'hi'\n \"\"\"\n logger.debug(\"raw_value %s\" % raw_value)\n reference_re = re.compile(r'\\${?(\\w+)}?')\n env_ref_re = re.compile(r'\\$ENV\\{(\\w+)\\}')\n shell_ref_re = re.compile(r'\\$SHELL\\{([^}]+)\\}')\n math_re = re.compile(r'\\s[+-/*]\\s')\n item_data = raw_value\n\n if item_data is None:\n return None\n\n if type(item_data) is not str:\n return item_data\n\n for m in env_ref_re.finditer(item_data):\n logger.debug(\"look for %s in env\" % item_data)\n env_var = m.groups()[0]\n expect(env_var in os.environ, \"Undefined env var '%s'\" % env_var)\n item_data = item_data.replace(m.group(), os.environ[env_var])\n\n for s in shell_ref_re.finditer(item_data):\n logger.debug(\"execute %s in shell\" % item_data)\n shell_cmd = s.groups()[0]\n item_data = item_data.replace(s.group(), run_cmd_no_fail(shell_cmd))\n\n for m in reference_re.finditer(item_data):\n var = m.groups()[0]\n logger.debug(\"find: %s\" % var)\n ref = self.get_value(var)\n if ref is not None:\n logger.debug(\"resolve: \" + str(ref))\n item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref)))\n elif var == \"CIMEROOT\":\n cimeroot = get_cime_root()\n item_data = item_data.replace(m.group(), cimeroot)\n elif var == \"SRCROOT\":\n srcroot = os.path.join(get_cime_root(),\"..\")\n item_data = item_data.replace(m.group(), srcroot)\n elif var == \"USER\":\n item_data = item_data.replace(m.group(), getpass.getuser())\n\n if math_re.search(item_data):\n try:\n tmp = eval(item_data)\n except:\n tmp = item_data\n item_data = str(tmp)\n\n return item_data\n\n def add_sub_node(self, node, subnode_name, subnode_text):\n expect(node is not None,\" Bad value passed\")\n subnode = ET.Element(subnode_name)\n subnode.text = subnode_text\n node.append(subnode)\n return node\n\n def validate_xml_file(self, filename, schema):\n \"\"\"\n validate an XML file against a provided schema file using pylint\n \"\"\"\n expect(os.path.isfile(filename),\"xml file not found %s\"%filename)\n expect(os.path.isfile(schema),\"schema file not found %s\"%schema)\n xmllint = find_executable(\"xmllint\")\n if xmllint is not None:\n logger.debug(\"Checking file %s against schema %s\"%(filename, schema))\n run_cmd_no_fail(\"%s --noout --schema %s %s\"%(xmllint, schema, filename))\n else:\n logger.warn(\"xmllint not found, could not validate file %s\"%filename)\n\n def get_element_text(self, element_name, attributes=None, root=None, xpath=None):\n element_node = self.get_optional_node(element_name, attributes, root, xpath)\n if element_node is not None:\n return element_node.text\n return None\n\n def set_element_text(self, element_name, new_text, attributes=None, root=None, xpath=None):\n element_node = self.get_optional_node(element_name, attributes, root, xpath)\n if element_node is not None:\n element_node.text = new_text\n return new_text\n return None\n\n def get_raw_record(self, root=None):\n if root is None:\n root = self.root\n try:\n xmlstr = ET.tostring(root)\n except ET.ParseError as e:\n ET.dump(root)\n expect(False, \"Could not write file %s, xml formatting error '%s'\" % (self.filename, e))\n return xmlstr\n\n\n", "path": "utils/python/CIME/XML/generic_xml.py"}]}
| 3,522 | 558 |
gh_patches_debug_38107
|
rasdani/github-patches
|
git_diff
|
pytorch__text-139
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unintuitive behavior of Iterator when sort is False
Currently, the following line is executed regardless of `sort` value.
https://github.com/pytorch/text/blob/2980f1bc39ba6af332c5c2783da8bee109796d4c/torchtext/data/iterator.py#L162
It could result in a counter-intuitive behavior when `sort is False`, since one would probably expect that the order of data is kept intact when `sort` is `False`.
I think this should be executed only when `sort` is `True`.
Is it by design, or a bug?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/iterator.py`
Content:
```
1 from __future__ import division
2
3 import math
4 import random
5 from contextlib import contextmanager
6 from copy import deepcopy
7
8 from .batch import Batch
9 from .dataset import Dataset
10
11
12 class RandomShuffler(object):
13 """Use random functions while keeping track of the random state to make it
14 reproducible and deterministic."""
15
16 def __init__(self, random_state=None):
17 self._random_state = random_state
18 if self._random_state is None:
19 self._random_state = random.getstate()
20
21 @contextmanager
22 def use_internal_state(self):
23 """Use a specific RNG state."""
24 old_state = random.getstate()
25 random.setstate(self._random_state)
26 yield
27 self._random_state = random.getstate()
28 random.setstate(old_state)
29
30 @property
31 def random_state(self):
32 return deepcopy(self._random_state)
33
34 @random_state.setter
35 def random_state(self, s):
36 self._random_state = s
37
38 def __call__(self, data):
39 """Shuffle and return a new list."""
40 with self.use_internal_state():
41 return random.sample(data, len(data))
42
43
44 class Iterator(object):
45 """Defines an iterator that loads batches of data from a Dataset.
46
47 Attributes:
48 dataset: The Dataset object to load Examples from.
49 batch_size: Batch size.
50 batch_size_fn: Function of three arguments (new example to add, current
51 count of examples in the batch, and current effective batch size)
52 that returns the new effective batch size resulting from adding
53 that example to a batch. This is useful for dynamic batching, where
54 this function would add to the current effective batch size the
55 number of tokens in the new example.
56 sort_key: A key to use for sorting examples in order to batch together
57 examples with similar lengths and minimize padding. The sort_key
58 provided to the Iterator constructor overrides the sort_key
59 attribute of the Dataset, or defers to it if None.
60 train: Whether the iterator represents a train set.
61 repeat: Whether to repeat the iterator for multiple epochs.
62 shuffle: Whether to shuffle examples between epochs.
63 sort: Whether to sort examples according to self.sort_key.
64 Note that repeat, shuffle, and sort default to train, train, and
65 (not train).
66 device: Device to create batches on. Use -1 for CPU and None for the
67 currently active GPU device.
68 """
69
70 def __init__(self, dataset, batch_size, sort_key=None, device=None,
71 batch_size_fn=lambda new, count, sofar: count, train=True,
72 repeat=None, shuffle=None, sort=None):
73 self.batch_size, self.train, self.dataset = batch_size, train, dataset
74 self.batch_size_fn = batch_size_fn
75 self.iterations = 0
76 self.repeat = train if repeat is None else repeat
77 self.shuffle = train if shuffle is None else shuffle
78 self.sort = not train if sort is None else sort
79 if sort_key is None:
80 self.sort_key = dataset.sort_key
81 else:
82 self.sort_key = sort_key
83 self.device = device
84
85 self.random_shuffler = RandomShuffler()
86
87 # For state loading/saving only
88 self._iterations_this_epoch = 0
89 self._random_state_this_epoch = None
90 self._restored_from_state = False
91
92 @classmethod
93 def splits(cls, datasets, batch_sizes=None, **kwargs):
94 """Create Iterator objects for multiple splits of a dataset.
95
96 Arguments:
97 datasets: Tuple of Dataset objects corresponding to the splits. The
98 first such object should be the train set.
99 batch_sizes: Tuple of batch sizes to use for the different splits,
100 or None to use the same batch_size for all splits.
101 Remaining keyword arguments: Passed to the constructor of the
102 iterator class being used.
103 """
104 if batch_sizes is None:
105 batch_sizes = [kwargs.pop('batch_size')] * len(datasets)
106 ret = []
107 for i in range(len(datasets)):
108 train = i == 0
109 ret.append(cls(
110 datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))
111 return tuple(ret)
112
113 def data(self):
114 """Return the examples in the dataset in order, sorted, or shuffled."""
115 if self.sort:
116 xs = sorted(self.dataset, key=self.sort_key)
117 elif self.shuffle:
118 xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]
119 else:
120 xs = self.dataset
121 return xs
122
123 def init_epoch(self):
124 """Set up the batch generator for a new epoch."""
125
126 if self._restored_from_state:
127 self.random_shuffler.random_state = self._random_state_this_epoch
128 else:
129 self._random_state_this_epoch = self.random_shuffler.random_state
130
131 self.create_batches()
132
133 if self._restored_from_state:
134 self._restored_from_state = False
135 else:
136 self._iterations_this_epoch = 0
137
138 if not self.repeat:
139 self.iterations = 0
140
141 def create_batches(self):
142 self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)
143
144 @property
145 def epoch(self):
146 return self.iterations / len(self)
147
148 def __len__(self):
149 return math.ceil(len(self.dataset) / self.batch_size)
150
151 def __iter__(self):
152 while True:
153 self.init_epoch()
154 for idx, minibatch in enumerate(self.batches):
155 # fast-forward if loaded from state
156 if self._iterations_this_epoch > idx:
157 continue
158 self.iterations += 1
159 self._iterations_this_epoch += 1
160 # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by
161 # decreasing order, which requires reversing relative to typical sort keys
162 minibatch.reverse()
163 yield Batch(minibatch, self.dataset, self.device,
164 self.train)
165 if not self.repeat:
166 raise StopIteration
167
168 def state_dict(self):
169 return {
170 "iterations": self.iterations,
171 "iterations_this_epoch": self._iterations_this_epoch,
172 "random_state_this_epoch": self._random_state_this_epoch}
173
174 def load_state_dict(self, state_dict):
175 self.iterations = state_dict["iterations"]
176 self._iterations_this_epoch = state_dict["iterations_this_epoch"]
177 self._random_state_this_epoch = state_dict["random_state_this_epoch"]
178 self._restored_from_state = True
179
180
181 class BPTTIterator(Iterator):
182 """Defines an iterator for language modeling tasks that use BPTT.
183
184 Provides contiguous streams of examples together with targets that are
185 one timestep further forward, for language modeling training with
186 backpropagation through time (BPTT). Expects a Dataset with a single
187 example and a single field called 'text' and produces Batches with text and
188 target attributes.
189
190 Attributes:
191 dataset: The Dataset object to load Examples from.
192 batch_size: Batch size.
193 bptt_len: Length of sequences for backpropagation through time.
194 sort_key: A key to use for sorting examples in order to batch together
195 examples with similar lengths and minimize padding. The sort_key
196 provided to the Iterator constructor overrides the sort_key
197 attribute of the Dataset, or defers to it if None.
198 train: Whether the iterator represents a train set.
199 repeat: Whether to repeat the iterator for multiple epochs.
200 shuffle: Whether to shuffle examples between epochs.
201 sort: Whether to sort examples according to self.sort_key.
202 Note that repeat, shuffle, and sort default to train, train, and
203 (not train).
204 device: Device to create batches on. Use -1 for CPU and None for the
205 currently active GPU device.
206 """
207
208 def __init__(self, dataset, batch_size, bptt_len, **kwargs):
209 self.bptt_len = bptt_len
210 super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)
211
212 def __len__(self):
213 return math.ceil(len(self.dataset[0].text) /
214 (self.batch_size * self.bptt_len))
215
216 def __iter__(self):
217 text = self.dataset[0].text
218 TEXT = self.dataset.fields['text']
219 TEXT.eos_token = None
220 text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *
221 self.batch_size - len(text)))
222 data = TEXT.numericalize(
223 [text], device=self.device, train=self.train)
224 data = data.view(self.batch_size, -1).t().contiguous()
225 dataset = Dataset(examples=self.dataset.examples, fields=[
226 ('text', TEXT), ('target', TEXT)])
227 while True:
228 for i in range(0, len(self) * self.bptt_len, self.bptt_len):
229 seq_len = min(self.bptt_len, len(data) - 1 - i)
230 yield Batch.fromvars(
231 dataset, self.batch_size, train=self.train,
232 text=data[i:i + seq_len],
233 target=data[i + 1:i + 1 + seq_len])
234 if not self.repeat:
235 raise StopIteration
236
237
238 class BucketIterator(Iterator):
239 """Defines an iterator that batches examples of similar lengths together.
240
241 Minimizes amount of padding needed while producing freshly shuffled
242 batches for each new epoch. See pool for the bucketing procedure used.
243 """
244
245 def create_batches(self):
246 if self.sort:
247 self.batches = batch(self.data(), self.batch_size,
248 self.batch_size_fn)
249 else:
250 self.batches = pool(self.data(), self.batch_size,
251 self.sort_key, self.batch_size_fn,
252 random_shuffler=self.random_shuffler)
253
254
255 def batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):
256 """Yield elements from data in chunks of batch_size."""
257 minibatch, size_so_far = [], 0
258 for ex in data:
259 minibatch.append(ex)
260 size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
261 if size_so_far == batch_size:
262 yield minibatch
263 minibatch, size_so_far = [], 0
264 elif size_so_far > batch_size:
265 yield minibatch[:-1]
266 minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)
267 if minibatch:
268 yield minibatch
269
270
271 def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,
272 random_shuffler=None):
273 """Sort within buckets, then batch, then shuffle batches.
274
275 Partitions data into chunks of size 100*batch_size, sorts examples within
276 each chunk using sort_key, then batch these examples and shuffle the
277 batches.
278 """
279 if random_shuffler is None:
280 random_shuffler = random.shuffle
281 for p in batch(data, batch_size * 100, batch_size_fn):
282 p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)
283 for b in random_shuffler(list(p_batch)):
284 yield b
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py
--- a/torchtext/data/iterator.py
+++ b/torchtext/data/iterator.py
@@ -63,19 +63,28 @@
sort: Whether to sort examples according to self.sort_key.
Note that repeat, shuffle, and sort default to train, train, and
(not train).
+ sort_within_batch: Whether to sort (in descending order according to
+ self.sort_key) within each batch. If None, defaults to self.sort.
+ If self.sort is True and this is False, the batch is left in the
+ original (ascending) sorted order.
device: Device to create batches on. Use -1 for CPU and None for the
currently active GPU device.
"""
def __init__(self, dataset, batch_size, sort_key=None, device=None,
batch_size_fn=lambda new, count, sofar: count, train=True,
- repeat=None, shuffle=None, sort=None):
+ repeat=None, shuffle=None, sort=None,
+ sort_within_batch=None):
self.batch_size, self.train, self.dataset = batch_size, train, dataset
self.batch_size_fn = batch_size_fn
self.iterations = 0
self.repeat = train if repeat is None else repeat
self.shuffle = train if shuffle is None else shuffle
self.sort = not train if sort is None else sort
+ if sort_within_batch is None:
+ self.sort_within_batch = self.sort
+ else:
+ self.sort_within_batch = sort_within_batch
if sort_key is None:
self.sort_key = dataset.sort_key
else:
@@ -157,9 +166,14 @@
continue
self.iterations += 1
self._iterations_this_epoch += 1
- # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by
- # decreasing order, which requires reversing relative to typical sort keys
- minibatch.reverse()
+ if self.sort_within_batch:
+ # NOTE: `rnn.pack_padded_sequence` requires that a minibatch
+ # be sorted by decreasing order, which requires reversing
+ # relative to typical sort keys
+ if self.sort:
+ minibatch.reverse()
+ else:
+ minibatch.sort(key=self.sort_key, reverse=True)
yield Batch(minibatch, self.dataset, self.device,
self.train)
if not self.repeat:
|
{"golden_diff": "diff --git a/torchtext/data/iterator.py b/torchtext/data/iterator.py\n--- a/torchtext/data/iterator.py\n+++ b/torchtext/data/iterator.py\n@@ -63,19 +63,28 @@\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n+ sort_within_batch: Whether to sort (in descending order according to\n+ self.sort_key) within each batch. If None, defaults to self.sort.\n+ If self.sort is True and this is False, the batch is left in the\n+ original (ascending) sorted order.\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n \n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n- repeat=None, shuffle=None, sort=None):\n+ repeat=None, shuffle=None, sort=None,\n+ sort_within_batch=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n+ if sort_within_batch is None:\n+ self.sort_within_batch = self.sort\n+ else:\n+ self.sort_within_batch = sort_within_batch\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n@@ -157,9 +166,14 @@\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n- # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by\n- # decreasing order, which requires reversing relative to typical sort keys\n- minibatch.reverse()\n+ if self.sort_within_batch:\n+ # NOTE: `rnn.pack_padded_sequence` requires that a minibatch\n+ # be sorted by decreasing order, which requires reversing\n+ # relative to typical sort keys\n+ if self.sort:\n+ minibatch.reverse()\n+ else:\n+ minibatch.sort(key=self.sort_key, reverse=True)\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n", "issue": "Unintuitive behavior of Iterator when sort is False\nCurrently, the following line is executed regardless of `sort` value.\r\nhttps://github.com/pytorch/text/blob/2980f1bc39ba6af332c5c2783da8bee109796d4c/torchtext/data/iterator.py#L162\r\n\r\nIt could result in a counter-intuitive behavior when `sort is False`, since one would probably expect that the order of data is kept intact when `sort` is `False`.\r\n\r\nI think this should be executed only when `sort` is `True`.\r\nIs it by design, or a bug?\n", "before_files": [{"content": "from __future__ import division\n\nimport math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n # NOTE: `rnn.pack_padded_sequence` requires that a minibatch be sorted by\n # decreasing order, which requires reversing relative to typical sort keys\n minibatch.reverse()\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}], "after_files": [{"content": "from __future__ import division\n\nimport math\nimport random\nfrom contextlib import contextmanager\nfrom copy import deepcopy\n\nfrom .batch import Batch\nfrom .dataset import Dataset\n\n\nclass RandomShuffler(object):\n \"\"\"Use random functions while keeping track of the random state to make it\n reproducible and deterministic.\"\"\"\n\n def __init__(self, random_state=None):\n self._random_state = random_state\n if self._random_state is None:\n self._random_state = random.getstate()\n\n @contextmanager\n def use_internal_state(self):\n \"\"\"Use a specific RNG state.\"\"\"\n old_state = random.getstate()\n random.setstate(self._random_state)\n yield\n self._random_state = random.getstate()\n random.setstate(old_state)\n\n @property\n def random_state(self):\n return deepcopy(self._random_state)\n\n @random_state.setter\n def random_state(self, s):\n self._random_state = s\n\n def __call__(self, data):\n \"\"\"Shuffle and return a new list.\"\"\"\n with self.use_internal_state():\n return random.sample(data, len(data))\n\n\nclass Iterator(object):\n \"\"\"Defines an iterator that loads batches of data from a Dataset.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n batch_size_fn: Function of three arguments (new example to add, current\n count of examples in the batch, and current effective batch size)\n that returns the new effective batch size resulting from adding\n that example to a batch. This is useful for dynamic batching, where\n this function would add to the current effective batch size the\n number of tokens in the new example.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n sort_within_batch: Whether to sort (in descending order according to\n self.sort_key) within each batch. If None, defaults to self.sort.\n If self.sort is True and this is False, the batch is left in the\n original (ascending) sorted order.\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, sort_key=None, device=None,\n batch_size_fn=lambda new, count, sofar: count, train=True,\n repeat=None, shuffle=None, sort=None,\n sort_within_batch=None):\n self.batch_size, self.train, self.dataset = batch_size, train, dataset\n self.batch_size_fn = batch_size_fn\n self.iterations = 0\n self.repeat = train if repeat is None else repeat\n self.shuffle = train if shuffle is None else shuffle\n self.sort = not train if sort is None else sort\n if sort_within_batch is None:\n self.sort_within_batch = self.sort\n else:\n self.sort_within_batch = sort_within_batch\n if sort_key is None:\n self.sort_key = dataset.sort_key\n else:\n self.sort_key = sort_key\n self.device = device\n\n self.random_shuffler = RandomShuffler()\n\n # For state loading/saving only\n self._iterations_this_epoch = 0\n self._random_state_this_epoch = None\n self._restored_from_state = False\n\n @classmethod\n def splits(cls, datasets, batch_sizes=None, **kwargs):\n \"\"\"Create Iterator objects for multiple splits of a dataset.\n\n Arguments:\n datasets: Tuple of Dataset objects corresponding to the splits. The\n first such object should be the train set.\n batch_sizes: Tuple of batch sizes to use for the different splits,\n or None to use the same batch_size for all splits.\n Remaining keyword arguments: Passed to the constructor of the\n iterator class being used.\n \"\"\"\n if batch_sizes is None:\n batch_sizes = [kwargs.pop('batch_size')] * len(datasets)\n ret = []\n for i in range(len(datasets)):\n train = i == 0\n ret.append(cls(\n datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))\n return tuple(ret)\n\n def data(self):\n \"\"\"Return the examples in the dataset in order, sorted, or shuffled.\"\"\"\n if self.sort:\n xs = sorted(self.dataset, key=self.sort_key)\n elif self.shuffle:\n xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]\n else:\n xs = self.dataset\n return xs\n\n def init_epoch(self):\n \"\"\"Set up the batch generator for a new epoch.\"\"\"\n\n if self._restored_from_state:\n self.random_shuffler.random_state = self._random_state_this_epoch\n else:\n self._random_state_this_epoch = self.random_shuffler.random_state\n\n self.create_batches()\n\n if self._restored_from_state:\n self._restored_from_state = False\n else:\n self._iterations_this_epoch = 0\n\n if not self.repeat:\n self.iterations = 0\n\n def create_batches(self):\n self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)\n\n @property\n def epoch(self):\n return self.iterations / len(self)\n\n def __len__(self):\n return math.ceil(len(self.dataset) / self.batch_size)\n\n def __iter__(self):\n while True:\n self.init_epoch()\n for idx, minibatch in enumerate(self.batches):\n # fast-forward if loaded from state\n if self._iterations_this_epoch > idx:\n continue\n self.iterations += 1\n self._iterations_this_epoch += 1\n if self.sort_within_batch:\n # NOTE: `rnn.pack_padded_sequence` requires that a minibatch\n # be sorted by decreasing order, which requires reversing\n # relative to typical sort keys\n if self.sort:\n minibatch.reverse()\n else:\n minibatch.sort(key=self.sort_key, reverse=True)\n yield Batch(minibatch, self.dataset, self.device,\n self.train)\n if not self.repeat:\n raise StopIteration\n\n def state_dict(self):\n return {\n \"iterations\": self.iterations,\n \"iterations_this_epoch\": self._iterations_this_epoch,\n \"random_state_this_epoch\": self._random_state_this_epoch}\n\n def load_state_dict(self, state_dict):\n self.iterations = state_dict[\"iterations\"]\n self._iterations_this_epoch = state_dict[\"iterations_this_epoch\"]\n self._random_state_this_epoch = state_dict[\"random_state_this_epoch\"]\n self._restored_from_state = True\n\n\nclass BPTTIterator(Iterator):\n \"\"\"Defines an iterator for language modeling tasks that use BPTT.\n\n Provides contiguous streams of examples together with targets that are\n one timestep further forward, for language modeling training with\n backpropagation through time (BPTT). Expects a Dataset with a single\n example and a single field called 'text' and produces Batches with text and\n target attributes.\n\n Attributes:\n dataset: The Dataset object to load Examples from.\n batch_size: Batch size.\n bptt_len: Length of sequences for backpropagation through time.\n sort_key: A key to use for sorting examples in order to batch together\n examples with similar lengths and minimize padding. The sort_key\n provided to the Iterator constructor overrides the sort_key\n attribute of the Dataset, or defers to it if None.\n train: Whether the iterator represents a train set.\n repeat: Whether to repeat the iterator for multiple epochs.\n shuffle: Whether to shuffle examples between epochs.\n sort: Whether to sort examples according to self.sort_key.\n Note that repeat, shuffle, and sort default to train, train, and\n (not train).\n device: Device to create batches on. Use -1 for CPU and None for the\n currently active GPU device.\n \"\"\"\n\n def __init__(self, dataset, batch_size, bptt_len, **kwargs):\n self.bptt_len = bptt_len\n super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)\n\n def __len__(self):\n return math.ceil(len(self.dataset[0].text) /\n (self.batch_size * self.bptt_len))\n\n def __iter__(self):\n text = self.dataset[0].text\n TEXT = self.dataset.fields['text']\n TEXT.eos_token = None\n text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size) *\n self.batch_size - len(text)))\n data = TEXT.numericalize(\n [text], device=self.device, train=self.train)\n data = data.view(self.batch_size, -1).t().contiguous()\n dataset = Dataset(examples=self.dataset.examples, fields=[\n ('text', TEXT), ('target', TEXT)])\n while True:\n for i in range(0, len(self) * self.bptt_len, self.bptt_len):\n seq_len = min(self.bptt_len, len(data) - 1 - i)\n yield Batch.fromvars(\n dataset, self.batch_size, train=self.train,\n text=data[i:i + seq_len],\n target=data[i + 1:i + 1 + seq_len])\n if not self.repeat:\n raise StopIteration\n\n\nclass BucketIterator(Iterator):\n \"\"\"Defines an iterator that batches examples of similar lengths together.\n\n Minimizes amount of padding needed while producing freshly shuffled\n batches for each new epoch. See pool for the bucketing procedure used.\n \"\"\"\n\n def create_batches(self):\n if self.sort:\n self.batches = batch(self.data(), self.batch_size,\n self.batch_size_fn)\n else:\n self.batches = pool(self.data(), self.batch_size,\n self.sort_key, self.batch_size_fn,\n random_shuffler=self.random_shuffler)\n\n\ndef batch(data, batch_size, batch_size_fn=lambda new, count, sofar: count):\n \"\"\"Yield elements from data in chunks of batch_size.\"\"\"\n minibatch, size_so_far = [], 0\n for ex in data:\n minibatch.append(ex)\n size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)\n if size_so_far == batch_size:\n yield minibatch\n minibatch, size_so_far = [], 0\n elif size_so_far > batch_size:\n yield minibatch[:-1]\n minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)\n if minibatch:\n yield minibatch\n\n\ndef pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,\n random_shuffler=None):\n \"\"\"Sort within buckets, then batch, then shuffle batches.\n\n Partitions data into chunks of size 100*batch_size, sorts examples within\n each chunk using sort_key, then batch these examples and shuffle the\n batches.\n \"\"\"\n if random_shuffler is None:\n random_shuffler = random.shuffle\n for p in batch(data, batch_size * 100, batch_size_fn):\n p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n", "path": "torchtext/data/iterator.py"}]}
| 3,578 | 550 |
gh_patches_debug_17717
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-763
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concurrency issues(?) (with tasks workers?)
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.9
* Nautobot version: 1.1.0
* Redis: redis:6.2.5-alpine
* PSQL: postgres:13.3-alpine
Docker-compose extract:
```
services:
nautobot: &nautobot
image: networktocode/nautobot:1.1.0-py3.9
depends_on:
- postgres
- redis
volumes:
- ./volumes/media:/opt/nautobot/media:z,rw
- ./volumes/git:/opt/nautobot/git:z,rw
- ./volumes/jobs:/opt/nautobot/jobs:z,rw
environment:
NAUTOBOT_DB_HOST: postgres
NAUTOBOT_DB_USER: nautobot
NAUTOBOT_DB_PASSWORD: nautobot
NAUTOBOT_DB_NAME: nautobot
NAUTOBOT_REDIS_HOST: redis
NAUTOBOT_SECRET_KEY: "*****"
NAUTOBOT_MAX_PAGE_SIZE: "50000"
NAUTOBOT_CHANGELOG_RETENTION: "366"
NAUTOBOT_METRICS_ENABLED: "true"
#NAUTOBOT_CACHEOPS_ENABLED: "false"
celery_worker:
<<: *nautobot
entrypoint: "nautobot-server celery worker -B -l INFO"
networks:
- default
labels: []
depends_on:
- nautobot
healthcheck:
interval: 5s
timeout: 5s
start_period: 5s
retries: 3
test: ["CMD", "nautobot-server", "health_check"]
rq_worker:
<<: *nautobot
entrypoint: "nautobot-server rqworker"
networks:
- default
labels: []
depends_on:
- nautobot
healthcheck:
interval: 5s
timeout: 5s
start_period: 5s
retries: 3
test: ["CMD", "nautobot-server", "health_check"]
# postgres - https://hub.docker.com/_/postgres
postgres:
image: postgres:13.3-alpine
volumes:
- ./volumes/pgsql:/var/lib/postgresql/data
environment:
POSTGRES_USER: nautobot
POSTGRES_PASSWORD: nautobot
POSTGRES_DB: nautobot
# redis - https://hub.docker.com/_/redis
redis:
image: redis:6.2.5-alpine
```
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Stop the Celery worker
2. Create jobs (doing many queries?):
```py
from nautobot.extras.jobs import Job
from nautobot.dcim.models import Device, Interface
import time
class TestA(Job):
devices = Device.objects
class Meta:
read_only = True
def test_a(self):
j = 0
for i, device in enumerate(self.devices.all()):
for j, interface in enumerate(device.interfaces.all(), start=j):
if j > 50:
break
self.log_info(obj=interface, message=f'Iteration {i}/{j}, name={device.name}/{interface.name}, {interface.connected_endpoint}')
time.sleep(0.1)
class TestB(Job):
devices = Device.objects
class Meta:
read_only = True
def test_b(self):
j = 0
for i, device in enumerate(self.devices.all()):
for j, interface in enumerate(device.interfaces.all(), start=j):
if j > 50:
break
self.log_info(obj=interface, message=f'Iteration {i}/{j}, name={device.name}/{interface.name}, {interface.connected_endpoint}')
time.sleep(0.1)
```
2. Start multiple instances of each (using the API if not allowed in the UI)
3. Start the worker
<!-- What did you expect to happen? -->
### Expected Behavior
Jobs are fully ran in the start order
<!-- What happened instead? -->
### Observed Behavior
Some jobs are stuck in either the pending or the running state, with errors in the worker logs
### Additional informations
This is the best repro I could find in many hours of really weird and random errors. I noticed setting `NAUTOBOT_CACHEOPS_ENABLED: 'false'` could help getting errors more often.
And in that case (no cacheops), right after an instance is started, listing the job results (which seems to imply `page_size` times git refresh ?) + loading another page (like job list or git list/detail) is also a good way to crash the web container with weird errors too, like:
* `<class 'AttributeError'>, 'datetime.date' object has no attribute 'encode'`
* `<class 'RuntimeError'>, generator raised StopIteration`
* `<class 'AttributeError'>, 'NoneType' object has no attribute 'DoesNotExist'`
And in the logs about the git refresh:
```
nautobot_1 | 18:42:23.628 INFO nautobot.jobs :
nautobot_1 | Repository successfully refreshed
nautobot_1 | 18:42:23.655 INFO nautobot.jobs :
nautobot_1 | Repository successfully refreshed
nautobot_1 | 18:42:23.677 INFO nautobot.jobs :
nautobot_1 | Repository successfully refreshed
[..]
```
So I'm not sure the issue is restricted to the jobs part.
Some more random errors observed:
* `<class 'django.contrib.contenttypes.models.ContentType.DoesNotExist'> ContentType matching query does not exist.`
* `<class 'django.db.utils.DatabaseError'> error with status PGRES_TUPLES_OK and no message from the libpq`
* `<class 'ValueError'> Field 'id' expected a number but got 'ryws2lq3ihs******md9auxf1ua3'.`
For the job part, a possible workaround seems to be to set the pool implementation to solo (https://docs.celeryproject.org/en/stable/reference/cli.html#cmdoption-celery-worker-P):
```yaml
entrypoint: "nautobot-server celery worker -B -l INFO-P solo"
```
This really feels like I'm doing something wrong (especially since nobody else seems complaining?), I really hope not, but if that's the case I can't point out what it is.
Best, Alexandre
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/celery.py`
Content:
```
1 import json
2 import logging
3
4 import nautobot
5
6 from celery import Celery, shared_task
7 from django.core.serializers.json import DjangoJSONEncoder
8 from django.utils.module_loading import import_string
9 from kombu.serialization import register
10
11 logger = logging.getLogger(__name__)
12
13 # The Celery documentation tells us to call setup on the app to initialize
14 # settings, but we will NOT be doing that because of a chicken-and-egg problem
15 # when bootstrapping the Django settings with `nautobot-server`.
16 #
17 # Note this would normally set the `DJANGO_SETTINGS_MODULE` environment variable
18 # which Celery and its workers need under the hood.The Celery docs and examples
19 # normally have you set it here, but because of our custom settings bootstrapping
20 # it is handled in the `nautobot.setup() call, and we have implemented a
21 # `nautobot-server celery` command to provide the correct context so this does
22 # NOT need to be called here.
23 # nautobot.setup()
24
25 app = Celery("nautobot")
26
27 # Using a string here means the worker doesn't have to serialize
28 # the configuration object to child processes. Again, this is possible
29 # only after calling `nautobot.setup()` which sets `DJANGO_SETTINGS_MODULE`.
30 # - namespace='CELERY' means all celery-related configuration keys
31 # should have a `CELERY_` prefix.
32 app.config_from_object("django.conf:settings", namespace="CELERY")
33
34 # Load task modules from all registered Django apps.
35 app.autodiscover_tasks()
36
37
38 class NautobotKombuJSONEncoder(DjangoJSONEncoder):
39 """
40 Custom json encoder based on DjangoJSONEncoder that serializes objects that implement
41 the `nautobot_serialize()` method via the `__nautobot_type__` interface. This is useful
42 in passing special objects to and from Celery tasks.
43
44 This pattern should generally be avoided by passing pointers to persisted objects to the
45 Celery tasks and retrieving them from within the task execution. While this is always possible
46 for model instances (which covers 99% of use cases), for rare instances where it does not,
47 and the actual object must be passed, this pattern allows for encoding and decoding
48 of such objects.
49
50 It requires a conforming class to implement the instance method `nautobot_serialize()` which
51 returns a json serializable dictionary of the object representation. The class must also implement
52 the `nautobot_deserialize()` class method which takes the dictionary representation and returns
53 an actual instance of the class.
54 """
55
56 def default(self, obj):
57 if hasattr(obj, "nautobot_serialize"):
58 cls = obj.__class__
59 module = cls.__module__
60 qual_name = ".".join([module, cls.__qualname__]) # fully qualified dotted import path
61 logger.debug("Performing nautobot serialization on %s for type %s", obj, qual_name)
62 data = {"__nautobot_type__": qual_name}
63 data.update(obj.nautobot_serialize())
64 return data
65
66 elif isinstance(obj, set):
67 # Convert a set to a list for passing to and from a task
68 return list(obj)
69
70 else:
71 return DjangoJSONEncoder.default(self, obj)
72
73
74 def nautobot_kombu_json_loads_hook(data):
75 """
76 In concert with the NautobotKombuJSONEncoder json encoder, this object hook method decodes
77 objects that implement the `__nautobot_type__` interface via the `nautobot_deserialize()` class method.
78 """
79 if "__nautobot_type__" in data:
80 qual_name = data.pop("__nautobot_type__")
81 logger.debug("Performing nautobot deserialization for type %s", qual_name)
82 cls = import_string(qual_name) # fully qualified dotted import path
83 if cls:
84 return cls.nautobot_deserialize(data)
85 else:
86 raise TypeError(f"Unable to import {qual_name} during nautobot deserialization")
87 else:
88 return data
89
90
91 # Encoder function
92 def _dumps(obj):
93 return json.dumps(obj, cls=NautobotKombuJSONEncoder)
94
95
96 # Decoder function
97 def _loads(obj):
98 return json.loads(obj, object_hook=nautobot_kombu_json_loads_hook)
99
100
101 # Register the custom serialization type
102 register("nautobot_json", _dumps, _loads, content_type="application/x-nautobot-json", content_encoding="utf-8")
103
104
105 #
106 # nautobot_task
107 #
108 # By exposing `shared_task` within our own namespace, we leave the door open to
109 # extending and expanding the usage and meaning of shared_task without having
110 # to undergo further refactoring of task's decorators. We could also transparently
111 # swap out shared_task to a custom base task.
112 #
113
114 nautobot_task = shared_task
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/core/celery.py b/nautobot/core/celery.py
--- a/nautobot/core/celery.py
+++ b/nautobot/core/celery.py
@@ -4,6 +4,7 @@
import nautobot
from celery import Celery, shared_task
+from celery.fixups.django import DjangoFixup
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.module_loading import import_string
from kombu.serialization import register
@@ -31,6 +32,11 @@
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
+# Because of the chicken-and-egg Django settings bootstrapping issue,
+# Celery doesn't automatically install its Django-specific patches.
+# So we need to explicitly do so ourselves:
+DjangoFixup(app).install()
+
# Load task modules from all registered Django apps.
app.autodiscover_tasks()
|
{"golden_diff": "diff --git a/nautobot/core/celery.py b/nautobot/core/celery.py\n--- a/nautobot/core/celery.py\n+++ b/nautobot/core/celery.py\n@@ -4,6 +4,7 @@\n import nautobot\n \n from celery import Celery, shared_task\n+from celery.fixups.django import DjangoFixup\n from django.core.serializers.json import DjangoJSONEncoder\n from django.utils.module_loading import import_string\n from kombu.serialization import register\n@@ -31,6 +32,11 @@\n # should have a `CELERY_` prefix.\n app.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\n \n+# Because of the chicken-and-egg Django settings bootstrapping issue,\n+# Celery doesn't automatically install its Django-specific patches.\n+# So we need to explicitly do so ourselves:\n+DjangoFixup(app).install()\n+\n # Load task modules from all registered Django apps.\n app.autodiscover_tasks()\n", "issue": "Concurrency issues(?) (with tasks workers?)\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.9\r\n* Nautobot version: 1.1.0\r\n* Redis: redis:6.2.5-alpine\r\n* PSQL: postgres:13.3-alpine\r\n\r\nDocker-compose extract:\r\n```\r\nservices:\r\n nautobot: &nautobot\r\n image: networktocode/nautobot:1.1.0-py3.9\r\n depends_on:\r\n - postgres\r\n - redis\r\n volumes:\r\n - ./volumes/media:/opt/nautobot/media:z,rw\r\n - ./volumes/git:/opt/nautobot/git:z,rw\r\n - ./volumes/jobs:/opt/nautobot/jobs:z,rw\r\n environment:\r\n NAUTOBOT_DB_HOST: postgres\r\n NAUTOBOT_DB_USER: nautobot\r\n NAUTOBOT_DB_PASSWORD: nautobot\r\n NAUTOBOT_DB_NAME: nautobot\r\n NAUTOBOT_REDIS_HOST: redis\r\n NAUTOBOT_SECRET_KEY: \"*****\"\r\n NAUTOBOT_MAX_PAGE_SIZE: \"50000\"\r\n NAUTOBOT_CHANGELOG_RETENTION: \"366\"\r\n NAUTOBOT_METRICS_ENABLED: \"true\"\r\n #NAUTOBOT_CACHEOPS_ENABLED: \"false\"\r\n\r\n celery_worker:\r\n <<: *nautobot\r\n entrypoint: \"nautobot-server celery worker -B -l INFO\"\r\n networks:\r\n - default\r\n labels: []\r\n depends_on:\r\n - nautobot\r\n healthcheck:\r\n interval: 5s\r\n timeout: 5s\r\n start_period: 5s\r\n retries: 3\r\n test: [\"CMD\", \"nautobot-server\", \"health_check\"]\r\n\r\n rq_worker:\r\n <<: *nautobot\r\n entrypoint: \"nautobot-server rqworker\"\r\n networks:\r\n - default\r\n labels: []\r\n depends_on:\r\n - nautobot\r\n healthcheck:\r\n interval: 5s\r\n timeout: 5s\r\n start_period: 5s\r\n retries: 3\r\n test: [\"CMD\", \"nautobot-server\", \"health_check\"]\r\n\r\n # postgres - https://hub.docker.com/_/postgres\r\n postgres:\r\n image: postgres:13.3-alpine\r\n volumes:\r\n - ./volumes/pgsql:/var/lib/postgresql/data\r\n environment:\r\n POSTGRES_USER: nautobot\r\n POSTGRES_PASSWORD: nautobot\r\n POSTGRES_DB: nautobot\r\n\r\n # redis - https://hub.docker.com/_/redis\r\n redis:\r\n image: redis:6.2.5-alpine\r\n```\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Stop the Celery worker\r\n2. Create jobs (doing many queries?):\r\n```py\r\nfrom nautobot.extras.jobs import Job\r\nfrom nautobot.dcim.models import Device, Interface\r\nimport time\r\n\r\n\r\nclass TestA(Job):\r\n\r\n devices = Device.objects\r\n\r\n class Meta:\r\n read_only = True\r\n\r\n def test_a(self):\r\n j = 0\r\n for i, device in enumerate(self.devices.all()):\r\n for j, interface in enumerate(device.interfaces.all(), start=j):\r\n if j > 50:\r\n break\r\n self.log_info(obj=interface, message=f'Iteration {i}/{j}, name={device.name}/{interface.name}, {interface.connected_endpoint}')\r\n time.sleep(0.1)\r\n\r\n\r\nclass TestB(Job):\r\n\r\n devices = Device.objects\r\n\r\n class Meta:\r\n read_only = True\r\n\r\n def test_b(self):\r\n j = 0\r\n for i, device in enumerate(self.devices.all()):\r\n for j, interface in enumerate(device.interfaces.all(), start=j):\r\n if j > 50:\r\n break\r\n self.log_info(obj=interface, message=f'Iteration {i}/{j}, name={device.name}/{interface.name}, {interface.connected_endpoint}')\r\n time.sleep(0.1)\r\n```\r\n\r\n2. Start multiple instances of each (using the API if not allowed in the UI)\r\n3. Start the worker\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nJobs are fully ran in the start order\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nSome jobs are stuck in either the pending or the running state, with errors in the worker logs\r\n\r\n### Additional informations\r\n\r\nThis is the best repro I could find in many hours of really weird and random errors. I noticed setting `NAUTOBOT_CACHEOPS_ENABLED: 'false'` could help getting errors more often.\r\n\r\nAnd in that case (no cacheops), right after an instance is started, listing the job results (which seems to imply `page_size` times git refresh ?) + loading another page (like job list or git list/detail) is also a good way to crash the web container with weird errors too, like:\r\n* `<class 'AttributeError'>, 'datetime.date' object has no attribute 'encode'`\r\n* `<class 'RuntimeError'>, generator raised StopIteration`\r\n* `<class 'AttributeError'>, 'NoneType' object has no attribute 'DoesNotExist'`\r\n\r\nAnd in the logs about the git refresh:\r\n```\r\nnautobot_1 | 18:42:23.628 INFO nautobot.jobs :\r\nnautobot_1 | Repository successfully refreshed\r\nnautobot_1 | 18:42:23.655 INFO nautobot.jobs :\r\nnautobot_1 | Repository successfully refreshed\r\nnautobot_1 | 18:42:23.677 INFO nautobot.jobs :\r\nnautobot_1 | Repository successfully refreshed\r\n[..]\r\n```\r\n\r\nSo I'm not sure the issue is restricted to the jobs part.\r\n\r\nSome more random errors observed:\r\n* `<class 'django.contrib.contenttypes.models.ContentType.DoesNotExist'> ContentType matching query does not exist.`\r\n* `<class 'django.db.utils.DatabaseError'> error with status PGRES_TUPLES_OK and no message from the libpq`\r\n* `<class 'ValueError'> Field 'id' expected a number but got 'ryws2lq3ihs******md9auxf1ua3'.`\r\n\r\nFor the job part, a possible workaround seems to be to set the pool implementation to solo (https://docs.celeryproject.org/en/stable/reference/cli.html#cmdoption-celery-worker-P): \r\n```yaml\r\n entrypoint: \"nautobot-server celery worker -B -l INFO-P solo\"\r\n```\r\n\r\nThis really feels like I'm doing something wrong (especially since nobody else seems complaining?), I really hope not, but if that's the case I can't point out what it is.\r\n\r\nBest, Alexandre\n", "before_files": [{"content": "import json\nimport logging\n\nimport nautobot\n\nfrom celery import Celery, shared_task\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.module_loading import import_string\nfrom kombu.serialization import register\n\nlogger = logging.getLogger(__name__)\n\n# The Celery documentation tells us to call setup on the app to initialize\n# settings, but we will NOT be doing that because of a chicken-and-egg problem\n# when bootstrapping the Django settings with `nautobot-server`.\n#\n# Note this would normally set the `DJANGO_SETTINGS_MODULE` environment variable\n# which Celery and its workers need under the hood.The Celery docs and examples\n# normally have you set it here, but because of our custom settings bootstrapping\n# it is handled in the `nautobot.setup() call, and we have implemented a\n# `nautobot-server celery` command to provide the correct context so this does\n# NOT need to be called here.\n# nautobot.setup()\n\napp = Celery(\"nautobot\")\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes. Again, this is possible\n# only after calling `nautobot.setup()` which sets `DJANGO_SETTINGS_MODULE`.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\n\n# Load task modules from all registered Django apps.\napp.autodiscover_tasks()\n\n\nclass NautobotKombuJSONEncoder(DjangoJSONEncoder):\n \"\"\"\n Custom json encoder based on DjangoJSONEncoder that serializes objects that implement\n the `nautobot_serialize()` method via the `__nautobot_type__` interface. This is useful\n in passing special objects to and from Celery tasks.\n\n This pattern should generally be avoided by passing pointers to persisted objects to the\n Celery tasks and retrieving them from within the task execution. While this is always possible\n for model instances (which covers 99% of use cases), for rare instances where it does not,\n and the actual object must be passed, this pattern allows for encoding and decoding\n of such objects.\n\n It requires a conforming class to implement the instance method `nautobot_serialize()` which\n returns a json serializable dictionary of the object representation. The class must also implement\n the `nautobot_deserialize()` class method which takes the dictionary representation and returns\n an actual instance of the class.\n \"\"\"\n\n def default(self, obj):\n if hasattr(obj, \"nautobot_serialize\"):\n cls = obj.__class__\n module = cls.__module__\n qual_name = \".\".join([module, cls.__qualname__]) # fully qualified dotted import path\n logger.debug(\"Performing nautobot serialization on %s for type %s\", obj, qual_name)\n data = {\"__nautobot_type__\": qual_name}\n data.update(obj.nautobot_serialize())\n return data\n\n elif isinstance(obj, set):\n # Convert a set to a list for passing to and from a task\n return list(obj)\n\n else:\n return DjangoJSONEncoder.default(self, obj)\n\n\ndef nautobot_kombu_json_loads_hook(data):\n \"\"\"\n In concert with the NautobotKombuJSONEncoder json encoder, this object hook method decodes\n objects that implement the `__nautobot_type__` interface via the `nautobot_deserialize()` class method.\n \"\"\"\n if \"__nautobot_type__\" in data:\n qual_name = data.pop(\"__nautobot_type__\")\n logger.debug(\"Performing nautobot deserialization for type %s\", qual_name)\n cls = import_string(qual_name) # fully qualified dotted import path\n if cls:\n return cls.nautobot_deserialize(data)\n else:\n raise TypeError(f\"Unable to import {qual_name} during nautobot deserialization\")\n else:\n return data\n\n\n# Encoder function\ndef _dumps(obj):\n return json.dumps(obj, cls=NautobotKombuJSONEncoder)\n\n\n# Decoder function\ndef _loads(obj):\n return json.loads(obj, object_hook=nautobot_kombu_json_loads_hook)\n\n\n# Register the custom serialization type\nregister(\"nautobot_json\", _dumps, _loads, content_type=\"application/x-nautobot-json\", content_encoding=\"utf-8\")\n\n\n#\n# nautobot_task\n#\n# By exposing `shared_task` within our own namespace, we leave the door open to\n# extending and expanding the usage and meaning of shared_task without having\n# to undergo further refactoring of task's decorators. We could also transparently\n# swap out shared_task to a custom base task.\n#\n\nnautobot_task = shared_task\n", "path": "nautobot/core/celery.py"}], "after_files": [{"content": "import json\nimport logging\n\nimport nautobot\n\nfrom celery import Celery, shared_task\nfrom celery.fixups.django import DjangoFixup\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.module_loading import import_string\nfrom kombu.serialization import register\n\nlogger = logging.getLogger(__name__)\n\n# The Celery documentation tells us to call setup on the app to initialize\n# settings, but we will NOT be doing that because of a chicken-and-egg problem\n# when bootstrapping the Django settings with `nautobot-server`.\n#\n# Note this would normally set the `DJANGO_SETTINGS_MODULE` environment variable\n# which Celery and its workers need under the hood.The Celery docs and examples\n# normally have you set it here, but because of our custom settings bootstrapping\n# it is handled in the `nautobot.setup() call, and we have implemented a\n# `nautobot-server celery` command to provide the correct context so this does\n# NOT need to be called here.\n# nautobot.setup()\n\napp = Celery(\"nautobot\")\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes. Again, this is possible\n# only after calling `nautobot.setup()` which sets `DJANGO_SETTINGS_MODULE`.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object(\"django.conf:settings\", namespace=\"CELERY\")\n\n# Because of the chicken-and-egg Django settings bootstrapping issue,\n# Celery doesn't automatically install its Django-specific patches.\n# So we need to explicitly do so ourselves:\nDjangoFixup(app).install()\n\n# Load task modules from all registered Django apps.\napp.autodiscover_tasks()\n\n\nclass NautobotKombuJSONEncoder(DjangoJSONEncoder):\n \"\"\"\n Custom json encoder based on DjangoJSONEncoder that serializes objects that implement\n the `nautobot_serialize()` method via the `__nautobot_type__` interface. This is useful\n in passing special objects to and from Celery tasks.\n\n This pattern should generally be avoided by passing pointers to persisted objects to the\n Celery tasks and retrieving them from within the task execution. While this is always possible\n for model instances (which covers 99% of use cases), for rare instances where it does not,\n and the actual object must be passed, this pattern allows for encoding and decoding\n of such objects.\n\n It requires a conforming class to implement the instance method `nautobot_serialize()` which\n returns a json serializable dictionary of the object representation. The class must also implement\n the `nautobot_deserialize()` class method which takes the dictionary representation and returns\n an actual instance of the class.\n \"\"\"\n\n def default(self, obj):\n if hasattr(obj, \"nautobot_serialize\"):\n cls = obj.__class__\n module = cls.__module__\n qual_name = \".\".join([module, cls.__qualname__]) # fully qualified dotted import path\n logger.debug(\"Performing nautobot serialization on %s for type %s\", obj, qual_name)\n data = {\"__nautobot_type__\": qual_name}\n data.update(obj.nautobot_serialize())\n return data\n\n elif isinstance(obj, set):\n # Convert a set to a list for passing to and from a task\n return list(obj)\n\n else:\n return DjangoJSONEncoder.default(self, obj)\n\n\ndef nautobot_kombu_json_loads_hook(data):\n \"\"\"\n In concert with the NautobotKombuJSONEncoder json encoder, this object hook method decodes\n objects that implement the `__nautobot_type__` interface via the `nautobot_deserialize()` class method.\n \"\"\"\n if \"__nautobot_type__\" in data:\n qual_name = data.pop(\"__nautobot_type__\")\n logger.debug(\"Performing nautobot deserialization for type %s\", qual_name)\n cls = import_string(qual_name) # fully qualified dotted import path\n if cls:\n return cls.nautobot_deserialize(data)\n else:\n raise TypeError(f\"Unable to import {qual_name} during nautobot deserialization\")\n else:\n return data\n\n\n# Encoder function\ndef _dumps(obj):\n return json.dumps(obj, cls=NautobotKombuJSONEncoder)\n\n\n# Decoder function\ndef _loads(obj):\n return json.loads(obj, object_hook=nautobot_kombu_json_loads_hook)\n\n\n# Register the custom serialization type\nregister(\"nautobot_json\", _dumps, _loads, content_type=\"application/x-nautobot-json\", content_encoding=\"utf-8\")\n\n\n#\n# nautobot_task\n#\n# By exposing `shared_task` within our own namespace, we leave the door open to\n# extending and expanding the usage and meaning of shared_task without having\n# to undergo further refactoring of task's decorators. We could also transparently\n# swap out shared_task to a custom base task.\n#\n\nnautobot_task = shared_task\n", "path": "nautobot/core/celery.py"}]}
| 3,200 | 212 |
gh_patches_debug_8884
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-3421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Audit log crash on IPv6
Affected version: 8.5.0
I was just saving some preferences and faced this error:
```
DataError: ERREUR: syntaxe en entrée invalide pour le type inet : « 2001 »
LINE 1: [email protected]', 2, NULL, 1, NULL, 11, '2001', 'e...
^
SQL: INSERT INTO "sentry_auditlogentry" ("organization_id", "actor_label", "actor_id", "actor_key_id", "target_object", "target_user_id", "event", "ip_address", "data", "datetime") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING "sentry_auditlogentry"."id"
```
Looks like IPv6 addresses are not handled properly.
Also reproduced by deleting a project.
Oddly this wasn't triggered on project creation and my IPv4 was logged instead of my IPv6.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/middleware/proxy.py`
Content:
```
1 from __future__ import absolute_import
2
3
4 class SetRemoteAddrFromForwardedFor(object):
5 def process_request(self, request):
6 try:
7 real_ip = request.META['HTTP_X_FORWARDED_FOR']
8 except KeyError:
9 pass
10 else:
11 # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
12 # Take just the first one.
13 real_ip = real_ip.split(",")[0]
14 if ':' in real_ip:
15 real_ip = real_ip.split(':', 1)[0]
16 request.META['REMOTE_ADDR'] = real_ip
17
18
19 class ContentLengthHeaderMiddleware(object):
20 """
21 Ensure that we have a proper Content-Length/Transfer-Encoding header
22 """
23
24 def process_response(self, request, response):
25 if 'Transfer-Encoding' in response or 'Content-Length' in response:
26 return response
27
28 if not response.streaming:
29 response['Content-Length'] = str(len(response.content))
30
31 return response
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/middleware/proxy.py b/src/sentry/middleware/proxy.py
--- a/src/sentry/middleware/proxy.py
+++ b/src/sentry/middleware/proxy.py
@@ -11,7 +11,8 @@
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# Take just the first one.
real_ip = real_ip.split(",")[0]
- if ':' in real_ip:
+ if ':' in real_ip and '.' in real_ip:
+ # Strip the port number off of an IPv4 FORWARDED_FOR entry.
real_ip = real_ip.split(':', 1)[0]
request.META['REMOTE_ADDR'] = real_ip
|
{"golden_diff": "diff --git a/src/sentry/middleware/proxy.py b/src/sentry/middleware/proxy.py\n--- a/src/sentry/middleware/proxy.py\n+++ b/src/sentry/middleware/proxy.py\n@@ -11,7 +11,8 @@\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n- if ':' in real_ip:\n+ if ':' in real_ip and '.' in real_ip:\n+ # Strip the port number off of an IPv4 FORWARDED_FOR entry.\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n", "issue": "Audit log crash on IPv6\nAffected version: 8.5.0\n\nI was just saving some preferences and faced this error:\n\n```\nDataError: ERREUR: syntaxe en entr\u00e9e invalide pour le type inet : \u00ab 2001 \u00bb\nLINE 1: [email protected]', 2, NULL, 1, NULL, 11, '2001', 'e...\n ^\n\nSQL: INSERT INTO \"sentry_auditlogentry\" (\"organization_id\", \"actor_label\", \"actor_id\", \"actor_key_id\", \"target_object\", \"target_user_id\", \"event\", \"ip_address\", \"data\", \"datetime\") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING \"sentry_auditlogentry\".\"id\"\n```\n\nLooks like IPv6 addresses are not handled properly.\n\nAlso reproduced by deleting a project.\nOddly this wasn't triggered on project creation and my IPv4 was logged instead of my IPv6.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\n\nclass SetRemoteAddrFromForwardedFor(object):\n def process_request(self, request):\n try:\n real_ip = request.META['HTTP_X_FORWARDED_FOR']\n except KeyError:\n pass\n else:\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n if ':' in real_ip:\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n\n\nclass ContentLengthHeaderMiddleware(object):\n \"\"\"\n Ensure that we have a proper Content-Length/Transfer-Encoding header\n \"\"\"\n\n def process_response(self, request, response):\n if 'Transfer-Encoding' in response or 'Content-Length' in response:\n return response\n\n if not response.streaming:\n response['Content-Length'] = str(len(response.content))\n\n return response\n", "path": "src/sentry/middleware/proxy.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\n\nclass SetRemoteAddrFromForwardedFor(object):\n def process_request(self, request):\n try:\n real_ip = request.META['HTTP_X_FORWARDED_FOR']\n except KeyError:\n pass\n else:\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n if ':' in real_ip and '.' in real_ip:\n # Strip the port number off of an IPv4 FORWARDED_FOR entry.\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n\n\nclass ContentLengthHeaderMiddleware(object):\n \"\"\"\n Ensure that we have a proper Content-Length/Transfer-Encoding header\n \"\"\"\n\n def process_response(self, request, response):\n if 'Transfer-Encoding' in response or 'Content-Length' in response:\n return response\n\n if not response.streaming:\n response['Content-Length'] = str(len(response.content))\n\n return response\n", "path": "src/sentry/middleware/proxy.py"}]}
| 750 | 154 |
gh_patches_debug_36367
|
rasdani/github-patches
|
git_diff
|
searx__searx-335
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flickr engine is broken
The html seems to have changed, but it's seems there is [REST API](https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=25&page=1&lang=en-US&rb=1&text=proxy&viewerNSID=&method=flickr.photos.search&csrf=&api_key=3e5918155f464baad83cce2efcf8b57e&format=json&hermes=1&hermesClient=1&reqId=rgb38n1&nojsoncallback=1)
In all parameters there is an api_key : I don't know how long it is valid, in which condition.
The call to this URL is trigger inside another minified javascript.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/flickr_noapi.py`
Content:
```
1 #!/usr/bin/env python
2
3 """
4 Flickr (Images)
5
6 @website https://www.flickr.com
7 @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
8
9 @using-api no
10 @results HTML
11 @stable no
12 @parse url, title, thumbnail, img_src
13 """
14
15 from urllib import urlencode
16 from json import loads
17 import re
18 from searx.engines import logger
19
20
21 logger = logger.getChild('flickr-noapi')
22
23 categories = ['images']
24
25 url = 'https://www.flickr.com/'
26 search_url = url + 'search?{query}&page={page}'
27 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
28 regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
29 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
30
31 paging = True
32
33
34 def build_flickr_url(user_id, photo_id):
35 return photo_url.format(userid=user_id, photoid=photo_id)
36
37
38 def request(query, params):
39 params['url'] = search_url.format(query=urlencode({'text': query}),
40 page=params['pageno'])
41 return params
42
43
44 def response(resp):
45 results = []
46
47 matches = regex.search(resp.text)
48
49 if matches is None:
50 return results
51
52 match = matches.group(1)
53 search_results = loads(match)
54
55 if '_data' not in search_results:
56 return []
57
58 photos = search_results['_data']
59
60 for photo in photos:
61
62 # In paged configuration, the first pages' photos
63 # are represented by a None object
64 if photo is None:
65 continue
66
67 img_src = None
68 # From the biggest to the lowest format
69 for image_size in image_sizes:
70 if image_size in photo['sizes']:
71 img_src = photo['sizes'][image_size]['url']
72 break
73
74 if not img_src:
75 logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
76 continue
77
78 if 'id' not in photo['owner']:
79 continue
80
81 # For a bigger thumbnail, keep only the url_z, not the url_n
82 if 'n' in photo['sizes']:
83 thumbnail_src = photo['sizes']['n']['url']
84 elif 'z' in photo['sizes']:
85 thumbnail_src = photo['sizes']['z']['url']
86 else:
87 thumbnail_src = img_src
88
89 url = build_flickr_url(photo['owner']['id'], photo['id'])
90
91 title = photo.get('title', '')
92
93 content = '<span class="photo-author">' +\
94 photo['owner']['username'] +\
95 '</span><br />'
96
97 if 'description' in photo:
98 content = content +\
99 '<span class="description">' +\
100 photo['description'] +\
101 '</span>'
102
103 # append result
104 results.append({'url': url,
105 'title': title,
106 'img_src': img_src,
107 'thumbnail_src': thumbnail_src,
108 'content': content,
109 'template': 'images.html'})
110
111 return results
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -25,7 +25,7 @@
url = 'https://www.flickr.com/'
search_url = url + 'search?{query}&page={page}'
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
-regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
+regex = re.compile(r"\"search-photos-lite-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
paging = True
@@ -38,6 +38,7 @@
def request(query, params):
params['url'] = search_url.format(query=urlencode({'text': query}),
page=params['pageno'])
+
return params
@@ -75,10 +76,10 @@
logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
continue
- if 'id' not in photo['owner']:
+ if 'ownerNsid' not in photo:
continue
-# For a bigger thumbnail, keep only the url_z, not the url_n
+ # For a bigger thumbnail, keep only the url_z, not the url_n
if 'n' in photo['sizes']:
thumbnail_src = photo['sizes']['n']['url']
elif 'z' in photo['sizes']:
@@ -86,20 +87,14 @@
else:
thumbnail_src = img_src
- url = build_flickr_url(photo['owner']['id'], photo['id'])
+ url = build_flickr_url(photo['ownerNsid'], photo['id'])
title = photo.get('title', '')
content = '<span class="photo-author">' +\
- photo['owner']['username'] +\
+ photo['username'] +\
'</span><br />'
- if 'description' in photo:
- content = content +\
- '<span class="description">' +\
- photo['description'] +\
- '</span>'
-
# append result
results.append({'url': url,
'title': title,
|
{"golden_diff": "diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py\n--- a/searx/engines/flickr_noapi.py\n+++ b/searx/engines/flickr_noapi.py\n@@ -25,7 +25,7 @@\n url = 'https://www.flickr.com/'\n search_url = url + 'search?{query}&page={page}'\n photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\n-regex = re.compile(r\"\\\"search-photos-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\n+regex = re.compile(r\"\\\"search-photos-lite-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\n image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n \n paging = True\n@@ -38,6 +38,7 @@\n def request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n+\n return params\n \n \n@@ -75,10 +76,10 @@\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n \n- if 'id' not in photo['owner']:\n+ if 'ownerNsid' not in photo:\n continue\n \n-# For a bigger thumbnail, keep only the url_z, not the url_n\n+ # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n@@ -86,20 +87,14 @@\n else:\n thumbnail_src = img_src\n \n- url = build_flickr_url(photo['owner']['id'], photo['id'])\n+ url = build_flickr_url(photo['ownerNsid'], photo['id'])\n \n title = photo.get('title', '')\n \n content = '<span class=\"photo-author\">' +\\\n- photo['owner']['username'] +\\\n+ photo['username'] +\\\n '</span><br />'\n \n- if 'description' in photo:\n- content = content +\\\n- '<span class=\"description\">' +\\\n- photo['description'] +\\\n- '</span>'\n-\n # append result\n results.append({'url': url,\n 'title': title,\n", "issue": "Flickr engine is broken\nThe html seems to have changed, but it's seems there is [REST API](https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=25&page=1&lang=en-US&rb=1&text=proxy&viewerNSID=&method=flickr.photos.search&csrf=&api_key=3e5918155f464baad83cce2efcf8b57e&format=json&hermes=1&hermesClient=1&reqId=rgb38n1&nojsoncallback=1)\n\nIn all parameters there is an api_key : I don't know how long it is valid, in which condition.\nThe call to this URL is trigger inside another minified javascript.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom urllib import urlencode\nfrom json import loads\nimport re\nfrom searx.engines import logger\n\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nregex = re.compile(r\"\\\"search-photos-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n return params\n\n\ndef response(resp):\n results = []\n\n matches = regex.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n search_results = loads(match)\n\n if '_data' not in search_results:\n return []\n\n photos = search_results['_data']\n\n for photo in photos:\n\n # In paged configuration, the first pages' photos\n # are represented by a None object\n if photo is None:\n continue\n\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n if 'id' not in photo['owner']:\n continue\n\n# For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n url = build_flickr_url(photo['owner']['id'], photo['id'])\n\n title = photo.get('title', '')\n\n content = '<span class=\"photo-author\">' +\\\n photo['owner']['username'] +\\\n '</span><br />'\n\n if 'description' in photo:\n content = content +\\\n '<span class=\"description\">' +\\\n photo['description'] +\\\n '</span>'\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'content': content,\n 'template': 'images.html'})\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom urllib import urlencode\nfrom json import loads\nimport re\nfrom searx.engines import logger\n\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nregex = re.compile(r\"\\\"search-photos-lite-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n\n return params\n\n\ndef response(resp):\n results = []\n\n matches = regex.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n search_results = loads(match)\n\n if '_data' not in search_results:\n return []\n\n photos = search_results['_data']\n\n for photo in photos:\n\n # In paged configuration, the first pages' photos\n # are represented by a None object\n if photo is None:\n continue\n\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n if 'ownerNsid' not in photo:\n continue\n\n # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n url = build_flickr_url(photo['ownerNsid'], photo['id'])\n\n title = photo.get('title', '')\n\n content = '<span class=\"photo-author\">' +\\\n photo['username'] +\\\n '</span><br />'\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'content': content,\n 'template': 'images.html'})\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}]}
| 1,483 | 557 |
gh_patches_debug_2152
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-552
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
setter for null
Currently null is not settable on a channel
It can be worked around with `channel.attrs['null']`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/data/_channel.py`
Content:
```
1 """Channel class and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 import h5py
10
11 from .. import kit as wt_kit
12 from .._dataset import Dataset
13
14
15 # --- class ---------------------------------------------------------------------------------------
16
17
18 class Channel(Dataset):
19 """Channel."""
20
21 class_name = 'Channel'
22
23 def __init__(self, parent, id, *, units=None, null=None, signed=None, label=None,
24 label_seed=None, **kwargs):
25 """Construct a channel object.
26
27 Parameters
28 ----------
29 values : array-like
30 Values.
31 name : string
32 Channel name.
33 units : string (optional)
34 Channel units. Default is None.
35 null : number (optional)
36 Channel null. Default is None (0).
37 signed : booelan (optional)
38 Channel signed flag. Default is None (guess).
39 label : string.
40 Label. Default is None.
41 label_seed : list of strings
42 Label seed. Default is None.
43 **kwargs
44 Additional keyword arguments are added to the attrs dictionary
45 and to the natural namespace of the object (if possible).
46 """
47 self._parent = parent
48 super().__init__(id)
49 self.label = label
50 self.label_seed = label_seed
51 self.units = units
52 self.dimensionality = len(self.shape)
53 # attrs
54 self.attrs.update(kwargs)
55 self.attrs['name'] = h5py.h5i.get_name(self.id).decode().split('/')[-1]
56 self.attrs['class'] = 'Channel'
57 if signed is not None:
58 self.attrs['signed'] = signed
59 if null is not None:
60 self.attrs['null'] = null
61 for key, value in self.attrs.items():
62 identifier = wt_kit.string2identifier(key)
63 if not hasattr(self, identifier):
64 setattr(self, identifier, value)
65
66 @property
67 def minor_extent(self):
68 """Minimum deviation from null."""
69 return min((self.max() - self.null, self.null - self.min()))
70
71 @property
72 def natural_name(self):
73 """Natural name of the dataset. May be different from name."""
74 try:
75 assert self._natural_name is not None
76 except (AssertionError, AttributeError):
77 self._natural_name = self.attrs['name']
78 finally:
79 return self._natural_name
80
81 @natural_name.setter
82 def natural_name(self, value):
83 index = wt_kit.get_index(self.parent.channel_names, self.natural_name)
84 new = list(self.parent.channel_names)
85 new[index] = value
86 self.parent.channel_names = new
87 self.attrs['name'] = value
88 self._natural_name = None
89
90 @property
91 def null(self):
92 if 'null' not in self.attrs.keys():
93 self.attrs['null'] = 0
94 return self.attrs['null']
95
96 @property
97 def major_extent(self):
98 """Maximum deviation from null."""
99 return max((self.max() - self.null, self.null - self.min()))
100
101 @property
102 def signed(self):
103 if 'signed' not in self.attrs.keys():
104 self.attrs['signed'] = False
105 return self.attrs['signed']
106
107 @signed.setter
108 def signed(self, value):
109 self.attrs['signed'] = value
110
111 def mag(self):
112 """Channel magnitude (maximum deviation from null)."""
113 return self.major_extent
114
115 def normalize(self):
116 """Normalize a Channel, set `null` to 0 and the mag to 1."""
117 def f(dataset, s, null, mag):
118 dataset[s] -= null
119 dataset[s] /= mag
120 if self.signed:
121 mag = self.mag()
122 else:
123 mag = self.max()
124 self.chunkwise(f, null=self.null, mag=mag)
125 self._null = 0
126
127 def trim(self, neighborhood, method='ztest', factor=3, replace='nan',
128 verbose=True):
129 """Remove outliers from the dataset.
130
131 Identifies outliers by comparing each point to its
132 neighbors using a statistical test.
133
134 Parameters
135 ----------
136 neighborhood : list of integers
137 Size of the neighborhood in each dimension. Length of the list must
138 be equal to the dimensionality of the channel.
139 method : {'ztest'} (optional)
140 Statistical test used to detect outliers. Default is ztest.
141
142 ztest
143 Compare point deviation from neighborhood mean to neighborhood
144 standard deviation.
145
146 factor : number (optional)
147 Tolerance factor. Default is 3.
148 replace : {'nan', 'mean', 'mask', number} (optional)
149 Behavior of outlier replacement. Default is nan.
150
151 nan
152 Outliers are replaced by numpy nans.
153
154 mean
155 Outliers are replaced by the mean of its neighborhood.
156
157 mask
158 Array is masked at outliers.
159
160 number
161 Array becomes given number.
162
163 Returns
164 -------
165 list of tuples
166 Indicies of trimmed outliers.
167
168 See Also
169 --------
170 clip
171 Remove pixels outside of a certain range.
172 """
173 raise NotImplementedError
174 outliers = []
175 means = []
176 # find outliers
177 for idx in np.ndindex(self.shape):
178 slices = []
179 for i, di, size in zip(idx, neighborhood, self.shape):
180 start = max(0, i - di)
181 stop = min(size, i + di + 1)
182 slices.append(slice(start, stop, 1))
183 neighbors = self[slices]
184 mean = np.nanmean(neighbors)
185 limit = np.nanstd(neighbors) * factor
186 if np.abs(self[idx] - mean) > limit:
187 outliers.append(idx)
188 means.append(mean)
189 # replace outliers
190 i = tuple(zip(*outliers))
191 if replace == 'nan':
192 self[i] = np.nan
193 elif replace == 'mean':
194 self[i] = means
195 elif replace == 'mask':
196 self[:] = np.ma.array(self[:])
197 self[i] = np.ma.masked
198 elif type(replace) in [int, float]:
199 self[i] = replace
200 else:
201 raise KeyError('replace must be one of {nan, mean, mask} or some number')
202 # finish
203 if verbose:
204 print('%i outliers removed' % len(outliers))
205 return outliers
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py
--- a/WrightTools/data/_channel.py
+++ b/WrightTools/data/_channel.py
@@ -93,6 +93,10 @@
self.attrs['null'] = 0
return self.attrs['null']
+ @null.setter
+ def null(self, value):
+ self.attrs['null'] = value
+
@property
def major_extent(self):
"""Maximum deviation from null."""
|
{"golden_diff": "diff --git a/WrightTools/data/_channel.py b/WrightTools/data/_channel.py\n--- a/WrightTools/data/_channel.py\n+++ b/WrightTools/data/_channel.py\n@@ -93,6 +93,10 @@\n self.attrs['null'] = 0\n return self.attrs['null']\n \n+ @null.setter\n+ def null(self, value):\n+ self.attrs['null'] = value\n+\n @property\n def major_extent(self):\n \"\"\"Maximum deviation from null.\"\"\"\n", "issue": "setter for null\nCurrently null is not settable on a channel\r\n\r\nIt can be worked around with `channel.attrs['null']`\n", "before_files": [{"content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = 'Channel'\n\n def __init__(self, parent, id, *, units=None, null=None, signed=None, label=None,\n label_seed=None, **kwargs):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs['name'] = h5py.h5i.get_name(self.id).decode().split('/')[-1]\n self.attrs['class'] = 'Channel'\n if signed is not None:\n self.attrs['signed'] = signed\n if null is not None:\n self.attrs['null'] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def minor_extent(self):\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs['name']\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n index = wt_kit.get_index(self.parent.channel_names, self.natural_name)\n new = list(self.parent.channel_names)\n new[index] = value\n self.parent.channel_names = new\n self.attrs['name'] = value\n self._natural_name = None\n\n @property\n def null(self):\n if 'null' not in self.attrs.keys():\n self.attrs['null'] = 0\n return self.attrs['null']\n\n @property\n def major_extent(self):\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def signed(self):\n if 'signed' not in self.attrs.keys():\n self.attrs['signed'] = False\n return self.attrs['signed']\n\n @signed.setter\n def signed(self, value):\n self.attrs['signed'] = value\n\n def mag(self):\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to 1.\"\"\"\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n if self.signed:\n mag = self.mag()\n else:\n mag = self.max()\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method='ztest', factor=3, replace='nan',\n verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', 'mask', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n mask\n Array is masked at outliers.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n raise NotImplementedError\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == 'nan':\n self[i] = np.nan\n elif replace == 'mean':\n self[i] = means\n elif replace == 'mask':\n self[:] = np.ma.array(self[:])\n self[i] = np.ma.masked\n elif type(replace) in [int, float]:\n self[i] = replace\n else:\n raise KeyError('replace must be one of {nan, mean, mask} or some number')\n # finish\n if verbose:\n print('%i outliers removed' % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py"}], "after_files": [{"content": "\"\"\"Channel class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nimport h5py\n\nfrom .. import kit as wt_kit\nfrom .._dataset import Dataset\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Channel(Dataset):\n \"\"\"Channel.\"\"\"\n\n class_name = 'Channel'\n\n def __init__(self, parent, id, *, units=None, null=None, signed=None, label=None,\n label_seed=None, **kwargs):\n \"\"\"Construct a channel object.\n\n Parameters\n ----------\n values : array-like\n Values.\n name : string\n Channel name.\n units : string (optional)\n Channel units. Default is None.\n null : number (optional)\n Channel null. Default is None (0).\n signed : booelan (optional)\n Channel signed flag. Default is None (guess).\n label : string.\n Label. Default is None.\n label_seed : list of strings\n Label seed. Default is None.\n **kwargs\n Additional keyword arguments are added to the attrs dictionary\n and to the natural namespace of the object (if possible).\n \"\"\"\n self._parent = parent\n super().__init__(id)\n self.label = label\n self.label_seed = label_seed\n self.units = units\n self.dimensionality = len(self.shape)\n # attrs\n self.attrs.update(kwargs)\n self.attrs['name'] = h5py.h5i.get_name(self.id).decode().split('/')[-1]\n self.attrs['class'] = 'Channel'\n if signed is not None:\n self.attrs['signed'] = signed\n if null is not None:\n self.attrs['null'] = null\n for key, value in self.attrs.items():\n identifier = wt_kit.string2identifier(key)\n if not hasattr(self, identifier):\n setattr(self, identifier, value)\n\n @property\n def minor_extent(self):\n \"\"\"Minimum deviation from null.\"\"\"\n return min((self.max() - self.null, self.null - self.min()))\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs['name']\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n index = wt_kit.get_index(self.parent.channel_names, self.natural_name)\n new = list(self.parent.channel_names)\n new[index] = value\n self.parent.channel_names = new\n self.attrs['name'] = value\n self._natural_name = None\n\n @property\n def null(self):\n if 'null' not in self.attrs.keys():\n self.attrs['null'] = 0\n return self.attrs['null']\n\n @null.setter\n def null(self, value):\n self.attrs['null'] = value\n\n @property\n def major_extent(self):\n \"\"\"Maximum deviation from null.\"\"\"\n return max((self.max() - self.null, self.null - self.min()))\n\n @property\n def signed(self):\n if 'signed' not in self.attrs.keys():\n self.attrs['signed'] = False\n return self.attrs['signed']\n\n @signed.setter\n def signed(self, value):\n self.attrs['signed'] = value\n\n def mag(self):\n \"\"\"Channel magnitude (maximum deviation from null).\"\"\"\n return self.major_extent\n\n def normalize(self):\n \"\"\"Normalize a Channel, set `null` to 0 and the mag to 1.\"\"\"\n def f(dataset, s, null, mag):\n dataset[s] -= null\n dataset[s] /= mag\n if self.signed:\n mag = self.mag()\n else:\n mag = self.max()\n self.chunkwise(f, null=self.null, mag=mag)\n self._null = 0\n\n def trim(self, neighborhood, method='ztest', factor=3, replace='nan',\n verbose=True):\n \"\"\"Remove outliers from the dataset.\n\n Identifies outliers by comparing each point to its\n neighbors using a statistical test.\n\n Parameters\n ----------\n neighborhood : list of integers\n Size of the neighborhood in each dimension. Length of the list must\n be equal to the dimensionality of the channel.\n method : {'ztest'} (optional)\n Statistical test used to detect outliers. Default is ztest.\n\n ztest\n Compare point deviation from neighborhood mean to neighborhood\n standard deviation.\n\n factor : number (optional)\n Tolerance factor. Default is 3.\n replace : {'nan', 'mean', 'mask', number} (optional)\n Behavior of outlier replacement. Default is nan.\n\n nan\n Outliers are replaced by numpy nans.\n\n mean\n Outliers are replaced by the mean of its neighborhood.\n\n mask\n Array is masked at outliers.\n\n number\n Array becomes given number.\n\n Returns\n -------\n list of tuples\n Indicies of trimmed outliers.\n\n See Also\n --------\n clip\n Remove pixels outside of a certain range.\n \"\"\"\n raise NotImplementedError\n outliers = []\n means = []\n # find outliers\n for idx in np.ndindex(self.shape):\n slices = []\n for i, di, size in zip(idx, neighborhood, self.shape):\n start = max(0, i - di)\n stop = min(size, i + di + 1)\n slices.append(slice(start, stop, 1))\n neighbors = self[slices]\n mean = np.nanmean(neighbors)\n limit = np.nanstd(neighbors) * factor\n if np.abs(self[idx] - mean) > limit:\n outliers.append(idx)\n means.append(mean)\n # replace outliers\n i = tuple(zip(*outliers))\n if replace == 'nan':\n self[i] = np.nan\n elif replace == 'mean':\n self[i] = means\n elif replace == 'mask':\n self[:] = np.ma.array(self[:])\n self[i] = np.ma.masked\n elif type(replace) in [int, float]:\n self[i] = replace\n else:\n raise KeyError('replace must be one of {nan, mean, mask} or some number')\n # finish\n if verbose:\n print('%i outliers removed' % len(outliers))\n return outliers\n", "path": "WrightTools/data/_channel.py"}]}
| 2,159 | 113 |
gh_patches_debug_35825
|
rasdani/github-patches
|
git_diff
|
facebookresearch__hydra-2174
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more details to Callback doc
(from zulip chat)
> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/experimental/callback.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 from typing import Any
4
5 from omegaconf import DictConfig
6
7 from hydra.core.utils import JobReturn
8
9 logger = logging.getLogger(__name__)
10
11
12 class Callback:
13 def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
14 """
15 Called in RUN mode before job starts.
16 """
17 ...
18
19 def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
20 """
21 Called in RUN mode after job ends.
22 """
23 ...
24
25 def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
26 """
27 Called in MULTIRUN mode before any job starts.
28 """
29 ...
30
31 def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
32 """
33 Called in MULTIRUN mode after all jobs end.
34 """
35 ...
36
37 def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
38 """
39 Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
40 application code).
41 """
42 ...
43
44 def on_job_end(
45 self, config: DictConfig, job_return: JobReturn, **kwargs: Any
46 ) -> None:
47 """
48 Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
49 application code).
50 """
51 ...
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py
--- a/hydra/experimental/callback.py
+++ b/hydra/experimental/callback.py
@@ -12,32 +12,37 @@
class Callback:
def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode before job starts.
+ Called in RUN mode before job/application code starts. `config` is composed with overrides.
+ Some `hydra.runtime` configs are not populated yet.
+ See hydra.core.utils.run_job for more info.
"""
...
def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode after job ends.
+ Called in RUN mode after job/application code returns.
"""
...
def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
Called in MULTIRUN mode before any job starts.
+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.
"""
...
def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in MULTIRUN mode after all jobs end.
+ Called in MULTIRUN mode after all jobs returns.
+ When using a launcher, this will be executed on local machine.
"""
...
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
- application code).
+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server along with your application code.
"""
...
@@ -47,5 +52,10 @@
"""
Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server after your application code.
+
+ `job_return` contains info that could be useful for logging or post-processing.
+ See hydra.core.utils.JobReturn for more.
"""
...
|
{"golden_diff": "diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py\n--- a/hydra/experimental/callback.py\n+++ b/hydra/experimental/callback.py\n@@ -12,32 +12,37 @@\n class Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode before job starts.\n+ Called in RUN mode before job/application code starts. `config` is composed with overrides.\n+ Some `hydra.runtime` configs are not populated yet.\n+ See hydra.core.utils.run_job for more info.\n \"\"\"\n ...\n \n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode after job ends.\n+ Called in RUN mode after job/application code returns.\n \"\"\"\n ...\n \n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.\n \"\"\"\n ...\n \n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in MULTIRUN mode after all jobs end.\n+ Called in MULTIRUN mode after all jobs returns.\n+ When using a launcher, this will be executed on local machine.\n \"\"\"\n ...\n \n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n- application code).\n+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server along with your application code.\n \"\"\"\n ...\n \n@@ -47,5 +52,10 @@\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server after your application code.\n+\n+ `job_return` contains info that could be useful for logging or post-processing.\n+ See hydra.core.utils.JobReturn for more.\n \"\"\"\n ...\n", "issue": "Add more details to Callback doc\n(from zulip chat)\r\n> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode before job starts.\n \"\"\"\n ...\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode after job ends.\n \"\"\"\n ...\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n \"\"\"\n ...\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode after all jobs end.\n \"\"\"\n ...\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n application code).\n \"\"\"\n ...\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n \"\"\"\n ...\n", "path": "hydra/experimental/callback.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode before job/application code starts. `config` is composed with overrides.\n Some `hydra.runtime` configs are not populated yet.\n See hydra.core.utils.run_job for more info.\n \"\"\"\n ...\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode after job/application code returns.\n \"\"\"\n ...\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.\n \"\"\"\n ...\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode after all jobs returns.\n When using a launcher, this will be executed on local machine.\n \"\"\"\n ...\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).\n This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n on the remote server along with your application code.\n \"\"\"\n ...\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n on the remote server after your application code.\n\n `job_return` contains info that could be useful for logging or post-processing.\n See hydra.core.utils.JobReturn for more.\n \"\"\"\n ...\n", "path": "hydra/experimental/callback.py"}]}
| 738 | 552 |
gh_patches_debug_29649
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4897
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances
**Describe the issue**
The CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.
**Examples**
For a PostgreSQL RDS instance with the following Terraform configuration:
**main.tf**
```
resource "aws_rds_cluster" "main" {
# ... other configurations ...
enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports
# ... other configurations ...
}
```
**variables.tf**
```
variable "enabled_cloudwatch_logs_exports" {
default = [
"postgresql"
]
}
```
The CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.
**Expected outcome**
The CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.
**Version**
**Checkov Version:** Lastest
**Additional context**
This issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.
https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
7
8
9 class RDSClusterAuditLogging(BaseResourceCheck):
10 def __init__(self) -> None:
11 """
12 NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,
13 NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),
14 NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),
15 NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
16 Database logging should be enabled
17 """
18 name = "Ensure that RDS Cluster audit logging is enabled"
19 id = "CKV_AWS_325"
20 supported_resources = ("aws_rds_cluster",)
21 categories = (CheckCategories.LOGGING,)
22 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
23
24 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
25 logs_exports = conf.get("enabled_cloudwatch_logs_exports")
26 if (
27 logs_exports
28 and isinstance(logs_exports, list)
29 and isinstance(logs_exports[0], list)
30 and "audit" in logs_exports[0]
31 ):
32 return CheckResult.PASSED
33
34 return CheckResult.FAILED
35
36
37 check = RDSClusterAuditLogging()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
@@ -5,6 +5,12 @@
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+SUPPORTED_ENGINES = {
+ "aurora",
+ "aurora-mysql",
+ "mysql"
+}
+
class RDSClusterAuditLogging(BaseResourceCheck):
def __init__(self) -> None:
@@ -15,13 +21,18 @@
NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
Database logging should be enabled
"""
- name = "Ensure that RDS Cluster audit logging is enabled"
+ name = "Ensure that RDS Cluster audit logging is enabled for MySQL engine"
id = "CKV_AWS_325"
supported_resources = ("aws_rds_cluster",)
categories = (CheckCategories.LOGGING,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ engine = conf.get("engine")
+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:
+ # only MySQL cluster support easy audit logging export
+ return CheckResult.UNKNOWN
+
logs_exports = conf.get("enabled_cloudwatch_logs_exports")
if (
logs_exports
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n@@ -5,6 +5,12 @@\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n+SUPPORTED_ENGINES = {\n+ \"aurora\",\n+ \"aurora-mysql\",\n+ \"mysql\"\n+}\n+\n \n class RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n@@ -15,13 +21,18 @@\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n- name = \"Ensure that RDS Cluster audit logging is enabled\"\n+ name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ engine = conf.get(\"engine\")\n+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:\n+ # only MySQL cluster support easy audit logging export\n+ return CheckResult.UNKNOWN\n+\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n", "issue": "CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances\n**Describe the issue**\r\nThe CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.\r\n\r\n**Examples**\r\nFor a PostgreSQL RDS instance with the following Terraform configuration:\r\n\r\n**main.tf**\r\n\r\n```\r\nresource \"aws_rds_cluster\" \"main\" {\r\n # ... other configurations ...\r\n enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports\r\n # ... other configurations ...\r\n}\r\n```\r\n\r\n**variables.tf**\r\n\r\n```\r\nvariable \"enabled_cloudwatch_logs_exports\" {\r\n default = [\r\n \"postgresql\"\r\n ]\r\n}\r\n```\r\n\r\nThe CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.\r\n\r\n**Expected outcome**\r\nThe CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.\r\n\r\n**Version**\r\n\r\n**Checkov Version:** Lastest\r\n\r\n**Additional context**\r\nThis issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,\n NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n name = \"Ensure that RDS Cluster audit logging is enabled\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n and isinstance(logs_exports, list)\n and isinstance(logs_exports[0], list)\n and \"audit\" in logs_exports[0]\n ):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = RDSClusterAuditLogging()\n", "path": "checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\nSUPPORTED_ENGINES = {\n \"aurora\",\n \"aurora-mysql\",\n \"mysql\"\n}\n\n\nclass RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,\n NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n engine = conf.get(\"engine\")\n if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:\n # only MySQL cluster support easy audit logging export\n return CheckResult.UNKNOWN\n\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n and isinstance(logs_exports, list)\n and isinstance(logs_exports[0], list)\n and \"audit\" in logs_exports[0]\n ):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = RDSClusterAuditLogging()\n", "path": "checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py"}]}
| 1,158 | 405 |
gh_patches_debug_7747
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-1981
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[V3 ModLog] Error in command "case"
# Command bugs
#### Command name
[p]case
#### What cog is this command from?
ModLog
#### What were you expecting to happen?
To show me information about a case.
#### What actually happened?
I get this error:

#### How can we reproduce this issue?
1. Have a modlog with cases.
2. Do [p]case
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redbot/cogs/modlog/modlog.py`
Content:
```
1 import discord
2
3 from redbot.core import checks, modlog, commands
4 from redbot.core.bot import Red
5 from redbot.core.i18n import Translator, cog_i18n
6 from redbot.core.utils.chat_formatting import box
7
8 _ = Translator("ModLog", __file__)
9
10
11 @cog_i18n(_)
12 class ModLog:
13 """Log for mod actions"""
14
15 def __init__(self, bot: Red):
16 self.bot = bot
17
18 @commands.group()
19 @checks.guildowner_or_permissions(administrator=True)
20 async def modlogset(self, ctx: commands.Context):
21 """Settings for the mod log"""
22 pass
23
24 @modlogset.command()
25 @commands.guild_only()
26 async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):
27 """Sets a channel as mod log
28
29 Leaving the channel parameter empty will deactivate it"""
30 guild = ctx.guild
31 if channel:
32 if channel.permissions_for(guild.me).send_messages:
33 await modlog.set_modlog_channel(guild, channel)
34 await ctx.send(_("Mod events will be sent to {}").format(channel.mention))
35 else:
36 await ctx.send(
37 _("I do not have permissions to send messages in {}!").format(channel.mention)
38 )
39 else:
40 try:
41 await modlog.get_modlog_channel(guild)
42 except RuntimeError:
43 await ctx.send_help()
44 else:
45 await modlog.set_modlog_channel(guild, None)
46 await ctx.send(_("Mod log deactivated."))
47
48 @modlogset.command(name="cases")
49 @commands.guild_only()
50 async def set_cases(self, ctx: commands.Context, action: str = None):
51 """Enables or disables case creation for each type of mod action"""
52 guild = ctx.guild
53
54 if action is None: # No args given
55 casetypes = await modlog.get_all_casetypes(guild)
56 await ctx.send_help()
57 title = _("Current settings:")
58 msg = ""
59 for ct in casetypes:
60 enabled = await ct.is_enabled()
61 value = "enabled" if enabled else "disabled"
62 msg += "%s : %s\n" % (ct.name, value)
63
64 msg = title + "\n" + box(msg)
65 await ctx.send(msg)
66 return
67 casetype = await modlog.get_casetype(action, guild)
68 if not casetype:
69 await ctx.send(_("That action is not registered"))
70 else:
71
72 enabled = await casetype.is_enabled()
73 await casetype.set_enabled(True if not enabled else False)
74
75 msg = _("Case creation for {} actions is now {}.").format(
76 action, "enabled" if not enabled else "disabled"
77 )
78 await ctx.send(msg)
79
80 @modlogset.command()
81 @commands.guild_only()
82 async def resetcases(self, ctx: commands.Context):
83 """Resets modlog's cases"""
84 guild = ctx.guild
85 await modlog.reset_cases(guild)
86 await ctx.send(_("Cases have been reset."))
87
88 @commands.command()
89 @commands.guild_only()
90 async def case(self, ctx: commands.Context, number: int):
91 """Shows the specified case"""
92 try:
93 case = await modlog.get_case(number, ctx.guild, self.bot)
94 except RuntimeError:
95 await ctx.send(_("That case does not exist for that server"))
96 return
97 else:
98 await ctx.send(embed=await case.get_case_msg_content())
99
100 @commands.command(usage="[case] <reason>")
101 @commands.guild_only()
102 async def reason(self, ctx: commands.Context, *, reason: str):
103 """Lets you specify a reason for mod-log's cases
104
105 Please note that you can only edit cases you are
106 the owner of unless you are a mod/admin or the server owner.
107
108 If no number is specified, the latest case will be used."""
109 author = ctx.author
110 guild = ctx.guild
111 potential_case = reason.split()[0]
112 if potential_case.isdigit():
113 case = int(potential_case)
114 reason = reason.replace(potential_case, "")
115 else:
116 case = str(int(await modlog.get_next_case_number(guild)) - 1)
117 # latest case
118 try:
119 case_before = await modlog.get_case(case, guild, self.bot)
120 except RuntimeError:
121 await ctx.send(_("That case does not exist!"))
122 return
123 else:
124 if case_before.moderator is None:
125 # No mod set, so attempt to find out if the author
126 # triggered the case creation with an action
127 bot_perms = guild.me.guild_permissions
128 if bot_perms.view_audit_log:
129 case_type = await modlog.get_casetype(case_before.action_type, guild)
130 if case_type is not None and case_type.audit_type is not None:
131 audit_type = getattr(discord.AuditLogAction, case_type.audit_type)
132 if audit_type:
133 audit_case = None
134 async for entry in guild.audit_logs(action=audit_type):
135 if (
136 entry.target.id == case_before.user.id
137 and entry.action == audit_type
138 ):
139 audit_case = entry
140 break
141 if audit_case:
142 case_before.moderator = audit_case.user
143 is_guild_owner = author == guild.owner
144 is_case_author = author == case_before.moderator
145 author_is_mod = await ctx.bot.is_mod(author)
146 if not (is_guild_owner or is_case_author or author_is_mod):
147 await ctx.send(_("You are not authorized to modify that case!"))
148 return
149 to_modify = {"reason": reason}
150 if case_before.moderator != author:
151 to_modify["amended_by"] = author
152 to_modify["modified_at"] = ctx.message.created_at.timestamp()
153 await case_before.edit(to_modify)
154 await ctx.send(_("Reason has been updated."))
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redbot/cogs/modlog/modlog.py b/redbot/cogs/modlog/modlog.py
--- a/redbot/cogs/modlog/modlog.py
+++ b/redbot/cogs/modlog/modlog.py
@@ -95,7 +95,10 @@
await ctx.send(_("That case does not exist for that server"))
return
else:
- await ctx.send(embed=await case.get_case_msg_content())
+ if await ctx.embed_requested():
+ await ctx.send(embed=await case.message_content(embed=True))
+ else:
+ await ctx.send(await case.message_content(embed=False))
@commands.command(usage="[case] <reason>")
@commands.guild_only()
|
{"golden_diff": "diff --git a/redbot/cogs/modlog/modlog.py b/redbot/cogs/modlog/modlog.py\n--- a/redbot/cogs/modlog/modlog.py\n+++ b/redbot/cogs/modlog/modlog.py\n@@ -95,7 +95,10 @@\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n- await ctx.send(embed=await case.get_case_msg_content())\n+ if await ctx.embed_requested():\n+ await ctx.send(embed=await case.message_content(embed=True))\n+ else:\n+ await ctx.send(await case.message_content(embed=False))\n \n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n", "issue": "[V3 ModLog] Error in command \"case\"\n# Command bugs\r\n\r\n\r\n#### Command name\r\n\r\n[p]case\r\n\r\n#### What cog is this command from?\r\n\r\nModLog\r\n\r\n#### What were you expecting to happen?\r\n\r\nTo show me information about a case.\r\n\r\n\r\n#### What actually happened?\r\nI get this error:\r\n\r\n\r\n\r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Have a modlog with cases.\r\n2. Do [p]case\r\n\n", "before_files": [{"content": "import discord\n\nfrom redbot.core import checks, modlog, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils.chat_formatting import box\n\n_ = Translator(\"ModLog\", __file__)\n\n\n@cog_i18n(_)\nclass ModLog:\n \"\"\"Log for mod actions\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.guildowner_or_permissions(administrator=True)\n async def modlogset(self, ctx: commands.Context):\n \"\"\"Settings for the mod log\"\"\"\n pass\n\n @modlogset.command()\n @commands.guild_only()\n async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):\n \"\"\"Sets a channel as mod log\n\n Leaving the channel parameter empty will deactivate it\"\"\"\n guild = ctx.guild\n if channel:\n if channel.permissions_for(guild.me).send_messages:\n await modlog.set_modlog_channel(guild, channel)\n await ctx.send(_(\"Mod events will be sent to {}\").format(channel.mention))\n else:\n await ctx.send(\n _(\"I do not have permissions to send messages in {}!\").format(channel.mention)\n )\n else:\n try:\n await modlog.get_modlog_channel(guild)\n except RuntimeError:\n await ctx.send_help()\n else:\n await modlog.set_modlog_channel(guild, None)\n await ctx.send(_(\"Mod log deactivated.\"))\n\n @modlogset.command(name=\"cases\")\n @commands.guild_only()\n async def set_cases(self, ctx: commands.Context, action: str = None):\n \"\"\"Enables or disables case creation for each type of mod action\"\"\"\n guild = ctx.guild\n\n if action is None: # No args given\n casetypes = await modlog.get_all_casetypes(guild)\n await ctx.send_help()\n title = _(\"Current settings:\")\n msg = \"\"\n for ct in casetypes:\n enabled = await ct.is_enabled()\n value = \"enabled\" if enabled else \"disabled\"\n msg += \"%s : %s\\n\" % (ct.name, value)\n\n msg = title + \"\\n\" + box(msg)\n await ctx.send(msg)\n return\n casetype = await modlog.get_casetype(action, guild)\n if not casetype:\n await ctx.send(_(\"That action is not registered\"))\n else:\n\n enabled = await casetype.is_enabled()\n await casetype.set_enabled(True if not enabled else False)\n\n msg = _(\"Case creation for {} actions is now {}.\").format(\n action, \"enabled\" if not enabled else \"disabled\"\n )\n await ctx.send(msg)\n\n @modlogset.command()\n @commands.guild_only()\n async def resetcases(self, ctx: commands.Context):\n \"\"\"Resets modlog's cases\"\"\"\n guild = ctx.guild\n await modlog.reset_cases(guild)\n await ctx.send(_(\"Cases have been reset.\"))\n\n @commands.command()\n @commands.guild_only()\n async def case(self, ctx: commands.Context, number: int):\n \"\"\"Shows the specified case\"\"\"\n try:\n case = await modlog.get_case(number, ctx.guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n await ctx.send(embed=await case.get_case_msg_content())\n\n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n async def reason(self, ctx: commands.Context, *, reason: str):\n \"\"\"Lets you specify a reason for mod-log's cases\n \n Please note that you can only edit cases you are\n the owner of unless you are a mod/admin or the server owner.\n \n If no number is specified, the latest case will be used.\"\"\"\n author = ctx.author\n guild = ctx.guild\n potential_case = reason.split()[0]\n if potential_case.isdigit():\n case = int(potential_case)\n reason = reason.replace(potential_case, \"\")\n else:\n case = str(int(await modlog.get_next_case_number(guild)) - 1)\n # latest case\n try:\n case_before = await modlog.get_case(case, guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist!\"))\n return\n else:\n if case_before.moderator is None:\n # No mod set, so attempt to find out if the author\n # triggered the case creation with an action\n bot_perms = guild.me.guild_permissions\n if bot_perms.view_audit_log:\n case_type = await modlog.get_casetype(case_before.action_type, guild)\n if case_type is not None and case_type.audit_type is not None:\n audit_type = getattr(discord.AuditLogAction, case_type.audit_type)\n if audit_type:\n audit_case = None\n async for entry in guild.audit_logs(action=audit_type):\n if (\n entry.target.id == case_before.user.id\n and entry.action == audit_type\n ):\n audit_case = entry\n break\n if audit_case:\n case_before.moderator = audit_case.user\n is_guild_owner = author == guild.owner\n is_case_author = author == case_before.moderator\n author_is_mod = await ctx.bot.is_mod(author)\n if not (is_guild_owner or is_case_author or author_is_mod):\n await ctx.send(_(\"You are not authorized to modify that case!\"))\n return\n to_modify = {\"reason\": reason}\n if case_before.moderator != author:\n to_modify[\"amended_by\"] = author\n to_modify[\"modified_at\"] = ctx.message.created_at.timestamp()\n await case_before.edit(to_modify)\n await ctx.send(_(\"Reason has been updated.\"))\n", "path": "redbot/cogs/modlog/modlog.py"}], "after_files": [{"content": "import discord\n\nfrom redbot.core import checks, modlog, commands\nfrom redbot.core.bot import Red\nfrom redbot.core.i18n import Translator, cog_i18n\nfrom redbot.core.utils.chat_formatting import box\n\n_ = Translator(\"ModLog\", __file__)\n\n\n@cog_i18n(_)\nclass ModLog:\n \"\"\"Log for mod actions\"\"\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.group()\n @checks.guildowner_or_permissions(administrator=True)\n async def modlogset(self, ctx: commands.Context):\n \"\"\"Settings for the mod log\"\"\"\n pass\n\n @modlogset.command()\n @commands.guild_only()\n async def modlog(self, ctx: commands.Context, channel: discord.TextChannel = None):\n \"\"\"Sets a channel as mod log\n\n Leaving the channel parameter empty will deactivate it\"\"\"\n guild = ctx.guild\n if channel:\n if channel.permissions_for(guild.me).send_messages:\n await modlog.set_modlog_channel(guild, channel)\n await ctx.send(_(\"Mod events will be sent to {}\").format(channel.mention))\n else:\n await ctx.send(\n _(\"I do not have permissions to send messages in {}!\").format(channel.mention)\n )\n else:\n try:\n await modlog.get_modlog_channel(guild)\n except RuntimeError:\n await ctx.send_help()\n else:\n await modlog.set_modlog_channel(guild, None)\n await ctx.send(_(\"Mod log deactivated.\"))\n\n @modlogset.command(name=\"cases\")\n @commands.guild_only()\n async def set_cases(self, ctx: commands.Context, action: str = None):\n \"\"\"Enables or disables case creation for each type of mod action\"\"\"\n guild = ctx.guild\n\n if action is None: # No args given\n casetypes = await modlog.get_all_casetypes(guild)\n await ctx.send_help()\n title = _(\"Current settings:\")\n msg = \"\"\n for ct in casetypes:\n enabled = await ct.is_enabled()\n value = \"enabled\" if enabled else \"disabled\"\n msg += \"%s : %s\\n\" % (ct.name, value)\n\n msg = title + \"\\n\" + box(msg)\n await ctx.send(msg)\n return\n casetype = await modlog.get_casetype(action, guild)\n if not casetype:\n await ctx.send(_(\"That action is not registered\"))\n else:\n\n enabled = await casetype.is_enabled()\n await casetype.set_enabled(True if not enabled else False)\n\n msg = _(\"Case creation for {} actions is now {}.\").format(\n action, \"enabled\" if not enabled else \"disabled\"\n )\n await ctx.send(msg)\n\n @modlogset.command()\n @commands.guild_only()\n async def resetcases(self, ctx: commands.Context):\n \"\"\"Resets modlog's cases\"\"\"\n guild = ctx.guild\n await modlog.reset_cases(guild)\n await ctx.send(_(\"Cases have been reset.\"))\n\n @commands.command()\n @commands.guild_only()\n async def case(self, ctx: commands.Context, number: int):\n \"\"\"Shows the specified case\"\"\"\n try:\n case = await modlog.get_case(number, ctx.guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist for that server\"))\n return\n else:\n if await ctx.embed_requested():\n await ctx.send(embed=await case.message_content(embed=True))\n else:\n await ctx.send(await case.message_content(embed=False))\n\n @commands.command(usage=\"[case] <reason>\")\n @commands.guild_only()\n async def reason(self, ctx: commands.Context, *, reason: str):\n \"\"\"Lets you specify a reason for mod-log's cases\n \n Please note that you can only edit cases you are\n the owner of unless you are a mod/admin or the server owner.\n \n If no number is specified, the latest case will be used.\"\"\"\n author = ctx.author\n guild = ctx.guild\n potential_case = reason.split()[0]\n if potential_case.isdigit():\n case = int(potential_case)\n reason = reason.replace(potential_case, \"\")\n else:\n case = str(int(await modlog.get_next_case_number(guild)) - 1)\n # latest case\n try:\n case_before = await modlog.get_case(case, guild, self.bot)\n except RuntimeError:\n await ctx.send(_(\"That case does not exist!\"))\n return\n else:\n if case_before.moderator is None:\n # No mod set, so attempt to find out if the author\n # triggered the case creation with an action\n bot_perms = guild.me.guild_permissions\n if bot_perms.view_audit_log:\n case_type = await modlog.get_casetype(case_before.action_type, guild)\n if case_type is not None and case_type.audit_type is not None:\n audit_type = getattr(discord.AuditLogAction, case_type.audit_type)\n if audit_type:\n audit_case = None\n async for entry in guild.audit_logs(action=audit_type):\n if (\n entry.target.id == case_before.user.id\n and entry.action == audit_type\n ):\n audit_case = entry\n break\n if audit_case:\n case_before.moderator = audit_case.user\n is_guild_owner = author == guild.owner\n is_case_author = author == case_before.moderator\n author_is_mod = await ctx.bot.is_mod(author)\n if not (is_guild_owner or is_case_author or author_is_mod):\n await ctx.send(_(\"You are not authorized to modify that case!\"))\n return\n to_modify = {\"reason\": reason}\n if case_before.moderator != author:\n to_modify[\"amended_by\"] = author\n to_modify[\"modified_at\"] = ctx.message.created_at.timestamp()\n await case_before.edit(to_modify)\n await ctx.send(_(\"Reason has been updated.\"))\n", "path": "redbot/cogs/modlog/modlog.py"}]}
| 2,036 | 152 |
gh_patches_debug_21688
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1419
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add pyhf.utils.citation to the python API
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/utils.py`
Content:
```
1 import json
2 import jsonschema
3 import pkg_resources
4 from pathlib import Path
5 import yaml
6 import click
7 import hashlib
8
9 from .exceptions import InvalidSpecification
10
11 SCHEMA_CACHE = {}
12 SCHEMA_BASE = "https://scikit-hep.org/pyhf/schemas/"
13 SCHEMA_VERSION = '1.0.0'
14
15
16 def load_schema(schema_id, version=None):
17 global SCHEMA_CACHE
18 if not version:
19 version = SCHEMA_VERSION
20 try:
21 return SCHEMA_CACHE[f'{SCHEMA_BASE}{Path(version).joinpath(schema_id)}']
22 except KeyError:
23 pass
24
25 path = pkg_resources.resource_filename(
26 __name__, str(Path('schemas').joinpath(version, schema_id))
27 )
28 with open(path) as json_schema:
29 schema = json.load(json_schema)
30 SCHEMA_CACHE[schema['$id']] = schema
31 return SCHEMA_CACHE[schema['$id']]
32
33
34 # load the defs.json as it is included by $ref
35 load_schema('defs.json')
36
37
38 def validate(spec, schema_name, version=None):
39 schema = load_schema(schema_name, version=version)
40 try:
41 resolver = jsonschema.RefResolver(
42 base_uri=f"file://{pkg_resources.resource_filename(__name__, 'schemas/'):s}",
43 referrer=schema_name,
44 store=SCHEMA_CACHE,
45 )
46 validator = jsonschema.Draft6Validator(
47 schema, resolver=resolver, format_checker=None
48 )
49 return validator.validate(spec)
50 except jsonschema.ValidationError as err:
51 raise InvalidSpecification(err, schema_name)
52
53
54 def options_from_eqdelimstring(opts):
55 document = '\n'.join(
56 f"{opt.split('=', 1)[0]}: {opt.split('=', 1)[1]}" for opt in opts
57 )
58 return yaml.safe_load(document)
59
60
61 class EqDelimStringParamType(click.ParamType):
62 name = 'equal-delimited option'
63
64 def convert(self, value, param, ctx):
65 try:
66 return options_from_eqdelimstring([value])
67 except IndexError:
68 self.fail(f'{value:s} is not a valid equal-delimited string', param, ctx)
69
70
71 def digest(obj, algorithm='sha256'):
72 """
73 Get the digest for the provided object. Note: object must be JSON-serializable.
74
75 The hashing algorithms supported are in :mod:`hashlib`, part of Python's Standard Libraries.
76
77 Example:
78
79 >>> import pyhf
80 >>> obj = {'a': 2.0, 'b': 3.0, 'c': 1.0}
81 >>> pyhf.utils.digest(obj)
82 'a38f6093800189b79bc22ef677baf90c75705af2cfc7ff594159eca54eaa7928'
83 >>> pyhf.utils.digest(obj, algorithm='md5')
84 '2c0633f242928eb55c3672fed5ba8612'
85 >>> pyhf.utils.digest(obj, algorithm='sha1')
86 '49a27f499e763766c9545b294880df277be6f545'
87
88 Raises:
89 ValueError: If the object is not JSON-serializable or if the algorithm is not supported.
90
91 Args:
92 obj (:obj:`jsonable`): A JSON-serializable object to compute the digest of. Usually a :class:`~pyhf.workspace.Workspace` object.
93 algorithm (:obj:`str`): The hashing algorithm to use.
94
95 Returns:
96 digest (:obj:`str`): The digest for the JSON-serialized object provided and hash algorithm specified.
97 """
98
99 try:
100 stringified = json.dumps(obj, sort_keys=True, ensure_ascii=False).encode('utf8')
101 except TypeError:
102 raise ValueError(
103 "The supplied object is not JSON-serializable for calculating a hash."
104 )
105 try:
106 hash_alg = getattr(hashlib, algorithm)
107 except AttributeError:
108 raise ValueError(
109 f"{algorithm} is not an algorithm provided by Python's hashlib library."
110 )
111 return hash_alg(stringified).hexdigest()
112
113
114 def remove_prefix(text, prefix):
115 """
116 Remove a prefix from the beginning of the provided text.
117
118 Example:
119
120 >>> import pyhf
121 >>> pyhf.utils.remove_prefix("alpha_syst1", "alpha_")
122 'syst1'
123
124 Args:
125 text (:obj:`str`): A provided input to manipulate.
126 prefix (:obj:`str`): A prefix to remove from provided input, if it exists.
127
128 Returns:
129 stripped_text (:obj:`str`): Text with the prefix removed.
130 """
131 # NB: python3.9 can be `return text.removeprefix(prefix)`
132 if text.startswith(prefix):
133 return text[len(prefix) :]
134 return text
135
136
137 def citation(oneline=False):
138 """
139 Get the bibtex citation for pyhf
140
141 Example:
142
143 >>> import pyhf
144 >>> pyhf.utils.citation(True)
145 '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = "{pyhf: v0.6.1}", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'
146
147 Keyword Args:
148 oneline (:obj:`bool`): Whether to provide citation with new lines (default) or as a one-liner.
149
150 Returns:
151 citation (:obj:`str`): The citation for this software
152 """
153 path = Path(
154 pkg_resources.resource_filename(
155 __name__, str(Path('data').joinpath('citation.bib'))
156 )
157 )
158 with path.open() as fp:
159 # remove end-of-file newline if there is one
160 data = fp.read().strip()
161
162 if oneline:
163 data = ''.join(data.splitlines())
164 return data
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyhf/utils.py b/src/pyhf/utils.py
--- a/src/pyhf/utils.py
+++ b/src/pyhf/utils.py
@@ -141,7 +141,7 @@
Example:
>>> import pyhf
- >>> pyhf.utils.citation(True)
+ >>> pyhf.utils.citation(oneline=True)
'@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = "{pyhf: v0.6.1}", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'
Keyword Args:
|
{"golden_diff": "diff --git a/src/pyhf/utils.py b/src/pyhf/utils.py\n--- a/src/pyhf/utils.py\n+++ b/src/pyhf/utils.py\n@@ -141,7 +141,7 @@\n Example:\n \n >>> import pyhf\n- >>> pyhf.utils.citation(True)\n+ >>> pyhf.utils.citation(oneline=True)\n '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = \"{pyhf: v0.6.1}\", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'\n \n Keyword Args:\n", "issue": "Add pyhf.utils.citation to the python API\n\n", "before_files": [{"content": "import json\nimport jsonschema\nimport pkg_resources\nfrom pathlib import Path\nimport yaml\nimport click\nimport hashlib\n\nfrom .exceptions import InvalidSpecification\n\nSCHEMA_CACHE = {}\nSCHEMA_BASE = \"https://scikit-hep.org/pyhf/schemas/\"\nSCHEMA_VERSION = '1.0.0'\n\n\ndef load_schema(schema_id, version=None):\n global SCHEMA_CACHE\n if not version:\n version = SCHEMA_VERSION\n try:\n return SCHEMA_CACHE[f'{SCHEMA_BASE}{Path(version).joinpath(schema_id)}']\n except KeyError:\n pass\n\n path = pkg_resources.resource_filename(\n __name__, str(Path('schemas').joinpath(version, schema_id))\n )\n with open(path) as json_schema:\n schema = json.load(json_schema)\n SCHEMA_CACHE[schema['$id']] = schema\n return SCHEMA_CACHE[schema['$id']]\n\n\n# load the defs.json as it is included by $ref\nload_schema('defs.json')\n\n\ndef validate(spec, schema_name, version=None):\n schema = load_schema(schema_name, version=version)\n try:\n resolver = jsonschema.RefResolver(\n base_uri=f\"file://{pkg_resources.resource_filename(__name__, 'schemas/'):s}\",\n referrer=schema_name,\n store=SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n schema, resolver=resolver, format_checker=None\n )\n return validator.validate(spec)\n except jsonschema.ValidationError as err:\n raise InvalidSpecification(err, schema_name)\n\n\ndef options_from_eqdelimstring(opts):\n document = '\\n'.join(\n f\"{opt.split('=', 1)[0]}: {opt.split('=', 1)[1]}\" for opt in opts\n )\n return yaml.safe_load(document)\n\n\nclass EqDelimStringParamType(click.ParamType):\n name = 'equal-delimited option'\n\n def convert(self, value, param, ctx):\n try:\n return options_from_eqdelimstring([value])\n except IndexError:\n self.fail(f'{value:s} is not a valid equal-delimited string', param, ctx)\n\n\ndef digest(obj, algorithm='sha256'):\n \"\"\"\n Get the digest for the provided object. Note: object must be JSON-serializable.\n\n The hashing algorithms supported are in :mod:`hashlib`, part of Python's Standard Libraries.\n\n Example:\n\n >>> import pyhf\n >>> obj = {'a': 2.0, 'b': 3.0, 'c': 1.0}\n >>> pyhf.utils.digest(obj)\n 'a38f6093800189b79bc22ef677baf90c75705af2cfc7ff594159eca54eaa7928'\n >>> pyhf.utils.digest(obj, algorithm='md5')\n '2c0633f242928eb55c3672fed5ba8612'\n >>> pyhf.utils.digest(obj, algorithm='sha1')\n '49a27f499e763766c9545b294880df277be6f545'\n\n Raises:\n ValueError: If the object is not JSON-serializable or if the algorithm is not supported.\n\n Args:\n obj (:obj:`jsonable`): A JSON-serializable object to compute the digest of. Usually a :class:`~pyhf.workspace.Workspace` object.\n algorithm (:obj:`str`): The hashing algorithm to use.\n\n Returns:\n digest (:obj:`str`): The digest for the JSON-serialized object provided and hash algorithm specified.\n \"\"\"\n\n try:\n stringified = json.dumps(obj, sort_keys=True, ensure_ascii=False).encode('utf8')\n except TypeError:\n raise ValueError(\n \"The supplied object is not JSON-serializable for calculating a hash.\"\n )\n try:\n hash_alg = getattr(hashlib, algorithm)\n except AttributeError:\n raise ValueError(\n f\"{algorithm} is not an algorithm provided by Python's hashlib library.\"\n )\n return hash_alg(stringified).hexdigest()\n\n\ndef remove_prefix(text, prefix):\n \"\"\"\n Remove a prefix from the beginning of the provided text.\n\n Example:\n\n >>> import pyhf\n >>> pyhf.utils.remove_prefix(\"alpha_syst1\", \"alpha_\")\n 'syst1'\n\n Args:\n text (:obj:`str`): A provided input to manipulate.\n prefix (:obj:`str`): A prefix to remove from provided input, if it exists.\n\n Returns:\n stripped_text (:obj:`str`): Text with the prefix removed.\n \"\"\"\n # NB: python3.9 can be `return text.removeprefix(prefix)`\n if text.startswith(prefix):\n return text[len(prefix) :]\n return text\n\n\ndef citation(oneline=False):\n \"\"\"\n Get the bibtex citation for pyhf\n\n Example:\n\n >>> import pyhf\n >>> pyhf.utils.citation(True)\n '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = \"{pyhf: v0.6.1}\", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'\n\n Keyword Args:\n oneline (:obj:`bool`): Whether to provide citation with new lines (default) or as a one-liner.\n\n Returns:\n citation (:obj:`str`): The citation for this software\n \"\"\"\n path = Path(\n pkg_resources.resource_filename(\n __name__, str(Path('data').joinpath('citation.bib'))\n )\n )\n with path.open() as fp:\n # remove end-of-file newline if there is one\n data = fp.read().strip()\n\n if oneline:\n data = ''.join(data.splitlines())\n return data\n", "path": "src/pyhf/utils.py"}], "after_files": [{"content": "import json\nimport jsonschema\nimport pkg_resources\nfrom pathlib import Path\nimport yaml\nimport click\nimport hashlib\n\nfrom .exceptions import InvalidSpecification\n\nSCHEMA_CACHE = {}\nSCHEMA_BASE = \"https://scikit-hep.org/pyhf/schemas/\"\nSCHEMA_VERSION = '1.0.0'\n\n\ndef load_schema(schema_id, version=None):\n global SCHEMA_CACHE\n if not version:\n version = SCHEMA_VERSION\n try:\n return SCHEMA_CACHE[f'{SCHEMA_BASE}{Path(version).joinpath(schema_id)}']\n except KeyError:\n pass\n\n path = pkg_resources.resource_filename(\n __name__, str(Path('schemas').joinpath(version, schema_id))\n )\n with open(path) as json_schema:\n schema = json.load(json_schema)\n SCHEMA_CACHE[schema['$id']] = schema\n return SCHEMA_CACHE[schema['$id']]\n\n\n# load the defs.json as it is included by $ref\nload_schema('defs.json')\n\n\ndef validate(spec, schema_name, version=None):\n schema = load_schema(schema_name, version=version)\n try:\n resolver = jsonschema.RefResolver(\n base_uri=f\"file://{pkg_resources.resource_filename(__name__, 'schemas/'):s}\",\n referrer=schema_name,\n store=SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n schema, resolver=resolver, format_checker=None\n )\n return validator.validate(spec)\n except jsonschema.ValidationError as err:\n raise InvalidSpecification(err, schema_name)\n\n\ndef options_from_eqdelimstring(opts):\n document = '\\n'.join(\n f\"{opt.split('=', 1)[0]}: {opt.split('=', 1)[1]}\" for opt in opts\n )\n return yaml.safe_load(document)\n\n\nclass EqDelimStringParamType(click.ParamType):\n name = 'equal-delimited option'\n\n def convert(self, value, param, ctx):\n try:\n return options_from_eqdelimstring([value])\n except IndexError:\n self.fail(f'{value:s} is not a valid equal-delimited string', param, ctx)\n\n\ndef digest(obj, algorithm='sha256'):\n \"\"\"\n Get the digest for the provided object. Note: object must be JSON-serializable.\n\n The hashing algorithms supported are in :mod:`hashlib`, part of Python's Standard Libraries.\n\n Example:\n\n >>> import pyhf\n >>> obj = {'a': 2.0, 'b': 3.0, 'c': 1.0}\n >>> pyhf.utils.digest(obj)\n 'a38f6093800189b79bc22ef677baf90c75705af2cfc7ff594159eca54eaa7928'\n >>> pyhf.utils.digest(obj, algorithm='md5')\n '2c0633f242928eb55c3672fed5ba8612'\n >>> pyhf.utils.digest(obj, algorithm='sha1')\n '49a27f499e763766c9545b294880df277be6f545'\n\n Raises:\n ValueError: If the object is not JSON-serializable or if the algorithm is not supported.\n\n Args:\n obj (:obj:`jsonable`): A JSON-serializable object to compute the digest of. Usually a :class:`~pyhf.workspace.Workspace` object.\n algorithm (:obj:`str`): The hashing algorithm to use.\n\n Returns:\n digest (:obj:`str`): The digest for the JSON-serialized object provided and hash algorithm specified.\n \"\"\"\n\n try:\n stringified = json.dumps(obj, sort_keys=True, ensure_ascii=False).encode('utf8')\n except TypeError:\n raise ValueError(\n \"The supplied object is not JSON-serializable for calculating a hash.\"\n )\n try:\n hash_alg = getattr(hashlib, algorithm)\n except AttributeError:\n raise ValueError(\n f\"{algorithm} is not an algorithm provided by Python's hashlib library.\"\n )\n return hash_alg(stringified).hexdigest()\n\n\ndef remove_prefix(text, prefix):\n \"\"\"\n Remove a prefix from the beginning of the provided text.\n\n Example:\n\n >>> import pyhf\n >>> pyhf.utils.remove_prefix(\"alpha_syst1\", \"alpha_\")\n 'syst1'\n\n Args:\n text (:obj:`str`): A provided input to manipulate.\n prefix (:obj:`str`): A prefix to remove from provided input, if it exists.\n\n Returns:\n stripped_text (:obj:`str`): Text with the prefix removed.\n \"\"\"\n # NB: python3.9 can be `return text.removeprefix(prefix)`\n if text.startswith(prefix):\n return text[len(prefix) :]\n return text\n\n\ndef citation(oneline=False):\n \"\"\"\n Get the bibtex citation for pyhf\n\n Example:\n\n >>> import pyhf\n >>> pyhf.utils.citation(oneline=True)\n '@software{pyhf, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark}, title = \"{pyhf: v0.6.1}\", version = {0.6.1}, doi = {10.5281/zenodo.1169739}, url = {https://github.com/scikit-hep/pyhf},}@article{pyhf_joss, doi = {10.21105/joss.02823}, url = {https://doi.org/10.21105/joss.02823}, year = {2021}, publisher = {The Open Journal}, volume = {6}, number = {58}, pages = {2823}, author = {Lukas Heinrich and Matthew Feickert and Giordon Stark and Kyle Cranmer}, title = {pyhf: pure-Python implementation of HistFactory statistical models}, journal = {Journal of Open Source Software}}'\n\n Keyword Args:\n oneline (:obj:`bool`): Whether to provide citation with new lines (default) or as a one-liner.\n\n Returns:\n citation (:obj:`str`): The citation for this software\n \"\"\"\n path = Path(\n pkg_resources.resource_filename(\n __name__, str(Path('data').joinpath('citation.bib'))\n )\n )\n with path.open() as fp:\n # remove end-of-file newline if there is one\n data = fp.read().strip()\n\n if oneline:\n data = ''.join(data.splitlines())\n return data\n", "path": "src/pyhf/utils.py"}]}
| 2,132 | 316 |
gh_patches_debug_30078
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-7089
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lookup bitwarden field=notes resulting in KeyError
### Summary
When using the community.general.bitwarden lookup there is an error trying to lookup a secure note.
Playbook:
```yaml
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}"
```
Error:
```
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
With debug:
```console
exception during Jinja2 execution: Traceback (most recent call last):
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py", line 831, in _lookup
ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in run
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in <listcomp>
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 141, in get_field
for custom_field in match['fields']:
~~~~~^^^^^^^^^^
KeyError: 'fields'
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
Same thing when trying to do the lookup by id:
```yaml
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}"
```
### Issue Type
Bug Report
### Component Name
bitwarden
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.15.0]
config file = /Users/sjoerd/.ansible.cfg
configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible
ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/homebrew/bin/ansible
python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)
jinja version = 3.1.2
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 7.0.1
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
Source: MacOS
Target: CentOS Stream 9
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- debug:
msg: >-
"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}"
```
### Expected Results
Expected the contents of the secure not to be printed to the terminal
### Actual Results
```console (paste below)
fatal: [my.hostname.net]: FAILED! => {
"msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'"
}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/lookup/bitwarden.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2022, Jonathan Lung <[email protected]>
3 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
4 # SPDX-License-Identifier: GPL-3.0-or-later
5 from __future__ import (absolute_import, division, print_function)
6 __metaclass__ = type
7
8 DOCUMENTATION = """
9 name: bitwarden
10 author:
11 - Jonathan Lung (@lungj) <[email protected]>
12 requirements:
13 - bw (command line utility)
14 - be logged into bitwarden
15 - bitwarden vault unlocked
16 - C(BW_SESSION) environment variable set
17 short_description: Retrieve secrets from Bitwarden
18 version_added: 5.4.0
19 description:
20 - Retrieve secrets from Bitwarden.
21 options:
22 _terms:
23 description: Key(s) to fetch values for from login info.
24 required: true
25 type: list
26 elements: str
27 search:
28 description: Field to retrieve, for example C(name) or C(id).
29 type: str
30 default: name
31 version_added: 5.7.0
32 field:
33 description: Field to fetch. Leave unset to fetch whole response.
34 type: str
35 collection_id:
36 description: Collection ID to filter results by collection. Leave unset to skip filtering.
37 type: str
38 version_added: 6.3.0
39 """
40
41 EXAMPLES = """
42 - name: "Get 'password' from Bitwarden record named 'a_test'"
43 ansible.builtin.debug:
44 msg: >-
45 {{ lookup('community.general.bitwarden', 'a_test', field='password') }}
46
47 - name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
48 ansible.builtin.debug:
49 msg: >-
50 {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
51
52 - name: "Get 'password' from Bitwarden record named 'a_test' from collection"
53 ansible.builtin.debug:
54 msg: >-
55 {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
56
57 - name: "Get full Bitwarden record named 'a_test'"
58 ansible.builtin.debug:
59 msg: >-
60 {{ lookup('community.general.bitwarden', 'a_test') }}
61
62 - name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
63 ansible.builtin.debug:
64 msg: >-
65 {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
66 """
67
68 RETURN = """
69 _raw:
70 description: List of requested field or JSON object of list of matches.
71 type: list
72 elements: raw
73 """
74
75 from subprocess import Popen, PIPE
76
77 from ansible.errors import AnsibleError
78 from ansible.module_utils.common.text.converters import to_bytes, to_text
79 from ansible.parsing.ajson import AnsibleJSONDecoder
80 from ansible.plugins.lookup import LookupBase
81
82
83 class BitwardenException(AnsibleError):
84 pass
85
86
87 class Bitwarden(object):
88
89 def __init__(self, path='bw'):
90 self._cli_path = path
91
92 @property
93 def cli_path(self):
94 return self._cli_path
95
96 @property
97 def unlocked(self):
98 out, err = self._run(['status'], stdin="")
99 decoded = AnsibleJSONDecoder().raw_decode(out)[0]
100 return decoded['status'] == 'unlocked'
101
102 def _run(self, args, stdin=None, expected_rc=0):
103 p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
104 out, err = p.communicate(to_bytes(stdin))
105 rc = p.wait()
106 if rc != expected_rc:
107 raise BitwardenException(err)
108 return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
109
110 def _get_matches(self, search_value, search_field, collection_id):
111 """Return matching records whose search_field is equal to key.
112 """
113
114 # Prepare set of params for Bitwarden CLI
115 params = ['list', 'items', '--search', search_value]
116
117 if collection_id:
118 params.extend(['--collectionid', collection_id])
119
120 out, err = self._run(params)
121
122 # This includes things that matched in different fields.
123 initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
124
125 # Filter to only include results from the right field.
126 return [item for item in initial_matches if item[search_field] == search_value]
127
128 def get_field(self, field, search_value, search_field="name", collection_id=None):
129 """Return a list of the specified field for records whose search_field match search_value
130 and filtered by collection if collection has been provided.
131
132 If field is None, return the whole record for each match.
133 """
134 matches = self._get_matches(search_value, search_field, collection_id)
135
136 if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
137 return [match['login'][field] for match in matches]
138 elif not field:
139 return matches
140 else:
141 custom_field_matches = []
142 for match in matches:
143 for custom_field in match['fields']:
144 if custom_field['name'] == field:
145 custom_field_matches.append(custom_field['value'])
146 if matches and not custom_field_matches:
147 raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
148 return custom_field_matches
149
150
151 class LookupModule(LookupBase):
152
153 def run(self, terms, variables=None, **kwargs):
154 self.set_options(var_options=variables, direct=kwargs)
155 field = self.get_option('field')
156 search_field = self.get_option('search')
157 collection_id = self.get_option('collection_id')
158 if not _bitwarden.unlocked:
159 raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
160
161 return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
162
163
164 _bitwarden = Bitwarden()
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py
--- a/plugins/lookup/bitwarden.py
+++ b/plugins/lookup/bitwarden.py
@@ -132,20 +132,29 @@
If field is None, return the whole record for each match.
"""
matches = self._get_matches(search_value, search_field, collection_id)
-
- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
- return [match['login'][field] for match in matches]
- elif not field:
+ if not field:
return matches
- else:
- custom_field_matches = []
- for match in matches:
+ field_matches = []
+ for match in matches:
+ # if there are no custom fields, then `match` has no key 'fields'
+ if 'fields' in match:
+ custom_field_found = False
for custom_field in match['fields']:
- if custom_field['name'] == field:
- custom_field_matches.append(custom_field['value'])
- if matches and not custom_field_matches:
- raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
- return custom_field_matches
+ if field == custom_field['name']:
+ field_matches.append(custom_field['value'])
+ custom_field_found = True
+ break
+ if custom_field_found:
+ continue
+ if 'login' in match and field in match['login']:
+ field_matches.append(match['login'][field])
+ continue
+ if field in match:
+ field_matches.append(match[field])
+ continue
+ if matches and not field_matches:
+ raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
+ return field_matches
class LookupModule(LookupBase):
|
{"golden_diff": "diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py\n--- a/plugins/lookup/bitwarden.py\n+++ b/plugins/lookup/bitwarden.py\n@@ -132,20 +132,29 @@\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n-\n- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n- return [match['login'][field] for match in matches]\n- elif not field:\n+ if not field:\n return matches\n- else:\n- custom_field_matches = []\n- for match in matches:\n+ field_matches = []\n+ for match in matches:\n+ # if there are no custom fields, then `match` has no key 'fields'\n+ if 'fields' in match:\n+ custom_field_found = False\n for custom_field in match['fields']:\n- if custom_field['name'] == field:\n- custom_field_matches.append(custom_field['value'])\n- if matches and not custom_field_matches:\n- raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n- return custom_field_matches\n+ if field == custom_field['name']:\n+ field_matches.append(custom_field['value'])\n+ custom_field_found = True\n+ break\n+ if custom_field_found:\n+ continue\n+ if 'login' in match and field in match['login']:\n+ field_matches.append(match['login'][field])\n+ continue\n+ if field in match:\n+ field_matches.append(match[field])\n+ continue\n+ if matches and not field_matches:\n+ raise AnsibleError(\"field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n+ return field_matches\n \n \n class LookupModule(LookupBase):\n", "issue": "lookup bitwarden field=notes resulting in KeyError\n### Summary\n\nWhen using the community.general.bitwarden lookup there is an error trying to lookup a secure note.\r\n\r\nPlaybook:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n```\r\n\r\nError:\r\n```\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\nWith debug:\r\n```console\r\nexception during Jinja2 execution: Traceback (most recent call last):\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py\", line 831, in _lookup\r\n ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in run\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in <listcomp>\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 141, in get_field\r\n for custom_field in match['fields']:\r\n ~~~~~^^^^^^^^^^\r\nKeyError: 'fields'\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\r\nSame thing when trying to do the lookup by id:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}\"\r\n```\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nbitwarden\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.15.0]\r\n config file = /Users/sjoerd/.ansible.cfg\r\n configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible\r\n ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /opt/homebrew/bin/ansible\r\n python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 7.0.1\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nSource: MacOS\r\nTarget: CentOS Stream 9\r\n\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n\r\n```\r\n\n\n### Expected Results\n\nExpected the contents of the secure not to be printed to the terminal\n\n### Actual Results\n\n```console (paste below)\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Jonathan Lung <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n name: bitwarden\n author:\n - Jonathan Lung (@lungj) <[email protected]>\n requirements:\n - bw (command line utility)\n - be logged into bitwarden\n - bitwarden vault unlocked\n - C(BW_SESSION) environment variable set\n short_description: Retrieve secrets from Bitwarden\n version_added: 5.4.0\n description:\n - Retrieve secrets from Bitwarden.\n options:\n _terms:\n description: Key(s) to fetch values for from login info.\n required: true\n type: list\n elements: str\n search:\n description: Field to retrieve, for example C(name) or C(id).\n type: str\n default: name\n version_added: 5.7.0\n field:\n description: Field to fetch. Leave unset to fetch whole response.\n type: str\n collection_id:\n description: Collection ID to filter results by collection. Leave unset to skip filtering.\n type: str\n version_added: 6.3.0\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: \"Get 'password' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record named 'a_test' from collection\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}\n\n- name: \"Get full Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test') }}\n\n- name: \"Get custom field 'api_key' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}\n\"\"\"\n\nRETURN = \"\"\"\n _raw:\n description: List of requested field or JSON object of list of matches.\n type: list\n elements: raw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.parsing.ajson import AnsibleJSONDecoder\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass BitwardenException(AnsibleError):\n pass\n\n\nclass Bitwarden(object):\n\n def __init__(self, path='bw'):\n self._cli_path = path\n\n @property\n def cli_path(self):\n return self._cli_path\n\n @property\n def unlocked(self):\n out, err = self._run(['status'], stdin=\"\")\n decoded = AnsibleJSONDecoder().raw_decode(out)[0]\n return decoded['status'] == 'unlocked'\n\n def _run(self, args, stdin=None, expected_rc=0):\n p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n out, err = p.communicate(to_bytes(stdin))\n rc = p.wait()\n if rc != expected_rc:\n raise BitwardenException(err)\n return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')\n\n def _get_matches(self, search_value, search_field, collection_id):\n \"\"\"Return matching records whose search_field is equal to key.\n \"\"\"\n\n # Prepare set of params for Bitwarden CLI\n params = ['list', 'items', '--search', search_value]\n\n if collection_id:\n params.extend(['--collectionid', collection_id])\n\n out, err = self._run(params)\n\n # This includes things that matched in different fields.\n initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]\n\n # Filter to only include results from the right field.\n return [item for item in initial_matches if item[search_field] == search_value]\n\n def get_field(self, field, search_value, search_field=\"name\", collection_id=None):\n \"\"\"Return a list of the specified field for records whose search_field match search_value\n and filtered by collection if collection has been provided.\n\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n\n if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n return [match['login'][field] for match in matches]\n elif not field:\n return matches\n else:\n custom_field_matches = []\n for match in matches:\n for custom_field in match['fields']:\n if custom_field['name'] == field:\n custom_field_matches.append(custom_field['value'])\n if matches and not custom_field_matches:\n raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n return custom_field_matches\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n self.set_options(var_options=variables, direct=kwargs)\n field = self.get_option('field')\n search_field = self.get_option('search')\n collection_id = self.get_option('collection_id')\n if not _bitwarden.unlocked:\n raise AnsibleError(\"Bitwarden Vault locked. Run 'bw unlock'.\")\n\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\n\n\n_bitwarden = Bitwarden()\n", "path": "plugins/lookup/bitwarden.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Jonathan Lung <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n name: bitwarden\n author:\n - Jonathan Lung (@lungj) <[email protected]>\n requirements:\n - bw (command line utility)\n - be logged into bitwarden\n - bitwarden vault unlocked\n - C(BW_SESSION) environment variable set\n short_description: Retrieve secrets from Bitwarden\n version_added: 5.4.0\n description:\n - Retrieve secrets from Bitwarden.\n options:\n _terms:\n description: Key(s) to fetch values for from login info.\n required: true\n type: list\n elements: str\n search:\n description: Field to retrieve, for example C(name) or C(id).\n type: str\n default: name\n version_added: 5.7.0\n field:\n description: Field to fetch. Leave unset to fetch whole response.\n type: str\n collection_id:\n description: Collection ID to filter results by collection. Leave unset to skip filtering.\n type: str\n version_added: 6.3.0\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: \"Get 'password' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record named 'a_test' from collection\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}\n\n- name: \"Get full Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test') }}\n\n- name: \"Get custom field 'api_key' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}\n\"\"\"\n\nRETURN = \"\"\"\n _raw:\n description: List of requested field or JSON object of list of matches.\n type: list\n elements: raw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.parsing.ajson import AnsibleJSONDecoder\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass BitwardenException(AnsibleError):\n pass\n\n\nclass Bitwarden(object):\n\n def __init__(self, path='bw'):\n self._cli_path = path\n\n @property\n def cli_path(self):\n return self._cli_path\n\n @property\n def unlocked(self):\n out, err = self._run(['status'], stdin=\"\")\n decoded = AnsibleJSONDecoder().raw_decode(out)[0]\n return decoded['status'] == 'unlocked'\n\n def _run(self, args, stdin=None, expected_rc=0):\n p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n out, err = p.communicate(to_bytes(stdin))\n rc = p.wait()\n if rc != expected_rc:\n raise BitwardenException(err)\n return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')\n\n def _get_matches(self, search_value, search_field, collection_id):\n \"\"\"Return matching records whose search_field is equal to key.\n \"\"\"\n\n # Prepare set of params for Bitwarden CLI\n params = ['list', 'items', '--search', search_value]\n\n if collection_id:\n params.extend(['--collectionid', collection_id])\n\n out, err = self._run(params)\n\n # This includes things that matched in different fields.\n initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]\n\n # Filter to only include results from the right field.\n return [item for item in initial_matches if item[search_field] == search_value]\n\n def get_field(self, field, search_value, search_field=\"name\", collection_id=None):\n \"\"\"Return a list of the specified field for records whose search_field match search_value\n and filtered by collection if collection has been provided.\n\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n if not field:\n return matches\n field_matches = []\n for match in matches:\n # if there are no custom fields, then `match` has no key 'fields'\n if 'fields' in match:\n custom_field_found = False\n for custom_field in match['fields']:\n if field == custom_field['name']:\n field_matches.append(custom_field['value'])\n custom_field_found = True\n break\n if custom_field_found:\n continue\n if 'login' in match and field in match['login']:\n field_matches.append(match['login'][field])\n continue\n if field in match:\n field_matches.append(match[field])\n continue\n if matches and not field_matches:\n raise AnsibleError(\"field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n return field_matches\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n self.set_options(var_options=variables, direct=kwargs)\n field = self.get_option('field')\n search_field = self.get_option('search')\n collection_id = self.get_option('collection_id')\n if not _bitwarden.unlocked:\n raise AnsibleError(\"Bitwarden Vault locked. Run 'bw unlock'.\")\n\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\n\n\n_bitwarden = Bitwarden()\n", "path": "plugins/lookup/bitwarden.py"}]}
| 3,294 | 443 |
gh_patches_debug_1795
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-3683
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Passing AWS_PROFILE on update-kubeconfig cli
Add profile details if calling update passing update-kubeconfig with profile details
#aws eks update-kubeconfig --name k8s-test --kubeconfig k8s-test.config --profile myprofile
Currently this generates as
```
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
args:
- token
- -i
- k8s-test
command: aws-iam-authenticator
```
Its good to add the profile details on the cli when not using default profile.
```
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: aws-iam-authenticator
args:
- "token"
- "-i"
- "k8s-test"
env:
- name: AWS_PROFILE
value: "myprofile"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/eks/update_kubeconfig.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13
14 import os
15 import logging
16
17 from botocore.compat import OrderedDict
18
19 from awscli.customizations.commands import BasicCommand
20 from awscli.customizations.utils import uni_print
21 from awscli.compat import is_windows
22 from awscli.compat import which
23 from awscli.customizations.eks.exceptions import EKSClusterError
24 from awscli.customizations.eks.kubeconfig import (Kubeconfig,
25 KubeconfigError,
26 KubeconfigLoader,
27 KubeconfigWriter,
28 KubeconfigValidator,
29 KubeconfigAppender)
30 from awscli.customizations.eks.ordered_yaml import ordered_yaml_dump
31
32 LOG = logging.getLogger(__name__)
33
34 DEFAULT_PATH = os.path.expanduser("~/.kube/config")
35
36 # Use the endpoint for kubernetes 1.10
37 # To get the most recent endpoint we will need to
38 # Do a check on the cluster's version number
39 API_VERSION = "client.authentication.k8s.io/v1alpha1"
40
41 AUTH_BIN = "aws-iam-authenticator"
42
43 AUTH_GOGET_PATH = ("github.com/kubernetes-sigs/"
44 "aws-iam-authenticator/cmd/aws-iam-authenticator")
45
46
47 def check_for_binary(binary):
48 return which(binary) is not None
49
50 def warn_of_missing_dependencies():
51 """
52 Check the dependencies of the kubectl configuration generated by
53 update-kubeconfig to see if any are missing.
54 If they are missing, print a warning with information on how to download
55 the missing dependency
56
57 Currently dependent on:
58 aws-iam-authenticator
59 """
60 if not check_for_binary(AUTH_BIN):
61 uni_print("Warning: {0} is not installed properly or is not in your path.\n"
62 "Refer to the AWS Documentation to download it at "
63 "https://docs.aws.amazon.com/eks/latest/userguide/configure-kubectl.html\n".format(AUTH_BIN))
64
65
66 class UpdateKubeconfigCommand(BasicCommand):
67 NAME = 'update-kubeconfig'
68
69 DESCRIPTION = BasicCommand.FROM_FILE(
70 'eks',
71 'update-kubeconfig',
72 '_description.rst'
73 )
74
75 ARG_TABLE = [
76 {
77 'name': 'name',
78 'help_text': ("The name of the cluster for which "
79 "to create a kubeconfig entry. "
80 "This cluster must exist in your account and in the "
81 "specified or configured default Region "
82 "for your AWS CLI installation."),
83 'required': True
84 },
85 {
86 'name': 'kubeconfig',
87 'help_text': ("Optionally specify a kubeconfig file to append "
88 "with your configuration. "
89 "By default, the configuration is written to the "
90 "first file path in the KUBECONFIG "
91 "environment variable (if it is set) "
92 "or the default kubeconfig path (.kube/config) "
93 "in your home directory."),
94 'required': False
95 },
96 {
97 'name': 'role-arn',
98 'help_text': ("To assume a role for cluster authentication, "
99 "specify an IAM role ARN with this option. "
100 "For example, if you created a cluster "
101 "while assuming an IAM role, "
102 "then you must also assume that role to "
103 "connect to the cluster the first time."),
104 'required': False
105 },
106 {
107 'name': 'dry-run',
108 'action': 'store_true',
109 'default': False,
110 'help_text': ("Print the merged kubeconfig to stdout instead of "
111 "writing it to the specified file."),
112 'required': False
113 },
114 {
115 'name': 'verbose',
116 'action': 'store_true',
117 'default': False,
118 'help_text': ("Print more detailed output "
119 "when writing to the kubeconfig file, "
120 "including the appended entries.")
121 }
122 ]
123
124 def _display_entries(self, entries):
125 """
126 Display entries in yaml format
127
128 :param entries: a list of OrderedDicts to be printed
129 :type entries: list
130 """
131 uni_print("Entries:\n\n")
132 for entry in entries:
133 uni_print(ordered_yaml_dump(entry))
134 uni_print("\n")
135
136 def _run_main(self, parsed_args, parsed_globals):
137 client = EKSClient(self._session,
138 parsed_args.name,
139 parsed_args.role_arn,
140 parsed_globals)
141 new_cluster_dict = client.get_cluster_entry()
142 new_user_dict = client.get_user_entry()
143
144 config_selector = KubeconfigSelector(
145 os.environ.get("KUBECONFIG", ""),
146 parsed_args.kubeconfig
147 )
148 config = config_selector.choose_kubeconfig(
149 new_cluster_dict["name"]
150 )
151 updating_existing = config.has_cluster(new_cluster_dict["name"])
152 appender = KubeconfigAppender()
153 new_context_dict = appender.insert_cluster_user_pair(config,
154 new_cluster_dict,
155 new_user_dict)
156
157 if parsed_args.dry_run:
158 uni_print(config.dump_content())
159 else:
160 writer = KubeconfigWriter()
161 writer.write_kubeconfig(config)
162
163 if updating_existing:
164 uni_print("Updated context {0} in {1}\n".format(
165 new_context_dict["name"], config.path
166 ))
167 else:
168 uni_print("Added new context {0} to {1}\n".format(
169 new_context_dict["name"], config.path
170 ))
171
172 if parsed_args.verbose:
173 self._display_entries([
174 new_context_dict,
175 new_user_dict,
176 new_cluster_dict
177 ])
178
179 warn_of_missing_dependencies()
180
181
182 class KubeconfigSelector(object):
183
184 def __init__(self, env_variable, path_in, validator=None,
185 loader=None):
186 """
187 Parse KUBECONFIG into a list of absolute paths.
188 Also replace the empty list with DEFAULT_PATH
189
190 :param env_variable: KUBECONFIG as a long string
191 :type env_variable: string
192
193 :param path_in: The path passed in through the CLI
194 :type path_in: string or None
195 """
196 if validator is None:
197 validator = KubeconfigValidator()
198 self._validator = validator
199
200 if loader is None:
201 loader = KubeconfigLoader(validator)
202 self._loader = loader
203
204 if path_in is not None:
205 # Override environment variable
206 self._paths = [self._expand_path(path_in)]
207 else:
208 # Get the list of paths from the environment variable
209 if env_variable == "":
210 env_variable = DEFAULT_PATH
211 self._paths = [self._expand_path(element)
212 for element in env_variable.split(os.pathsep)
213 if len(element.strip()) > 0]
214 if len(self._paths) == 0:
215 self._paths = [DEFAULT_PATH]
216
217 def choose_kubeconfig(self, cluster_name):
218 """
219 Choose which kubeconfig file to read from.
220 If name is already an entry in one of the $KUBECONFIG files,
221 choose that one.
222 Otherwise choose the first file.
223
224 :param cluster_name: The name of the cluster which is going to be added
225 :type cluster_name: String
226
227 :return: a chosen Kubeconfig based on above rules
228 :rtype: Kubeconfig
229 """
230 # Search for an existing entry to update
231 for candidate_path in self._paths:
232 try:
233 loaded_config = self._loader.load_kubeconfig(candidate_path)
234
235 if loaded_config.has_cluster(cluster_name):
236 LOG.debug("Found entry to update at {0}".format(
237 candidate_path
238 ))
239 return loaded_config
240 except KubeconfigError as e:
241 LOG.warning("Passing {0}:{1}".format(candidate_path, e))
242
243 # No entry was found, use the first file in KUBECONFIG
244 #
245 # Note: This could raise KubeconfigErrors if paths[0] is corrupted
246 return self._loader.load_kubeconfig(self._paths[0])
247
248 def _expand_path(self, path):
249 """ A helper to expand a path to a full absolute path. """
250 return os.path.abspath(os.path.expanduser(path))
251
252
253 class EKSClient(object):
254 def __init__(self, session, cluster_name, role_arn, parsed_globals=None):
255 self._session = session
256 self._cluster_name = cluster_name
257 self._role_arn = role_arn
258 self._cluster_description = None
259 self._globals = parsed_globals
260
261 def _get_cluster_description(self):
262 """
263 Use an eks describe-cluster call to get the cluster description
264 Cache the response in self._cluster_description.
265 describe-cluster will only be called once.
266 """
267 if self._cluster_description is None:
268 if self._globals is None:
269 client = self._session.create_client("eks")
270 else:
271 client = self._session.create_client(
272 "eks",
273 region_name=self._globals.region,
274 endpoint_url=self._globals.endpoint_url,
275 verify=self._globals.verify_ssl
276 )
277 full_description = client.describe_cluster(name=self._cluster_name)
278 self._cluster_description = full_description["cluster"]
279
280 if "status" not in self._cluster_description:
281 raise EKSClusterError("Cluster not found")
282 if self._cluster_description["status"] != "ACTIVE":
283 raise EKSClusterError("Cluster status not active")
284
285 return self._cluster_description
286
287 def get_cluster_entry(self):
288 """
289 Return a cluster entry generated using
290 the previously obtained description.
291 """
292
293 cert_data = self._get_cluster_description().get("certificateAuthority",
294 {"data": ""})["data"]
295 endpoint = self._get_cluster_description().get("endpoint")
296 arn = self._get_cluster_description().get("arn")
297
298 return OrderedDict([
299 ("cluster", OrderedDict([
300 ("certificate-authority-data", cert_data),
301 ("server", endpoint)
302 ])),
303 ("name", arn)
304 ])
305
306 def get_user_entry(self):
307 """
308 Return a user entry generated using
309 the previously obtained description.
310 """
311 generated_user = OrderedDict([
312 ("name", self._get_cluster_description().get("arn", "")),
313 ("user", OrderedDict([
314 ("exec", OrderedDict([
315 ("apiVersion", API_VERSION),
316 ("args",
317 [
318 "token",
319 "-i",
320 self._cluster_name
321 ]),
322 ("command", AUTH_BIN)
323 ]))
324 ]))
325 ])
326
327 if is_windows:
328 generated_user["user"]["exec"]["command"] += ".exe"
329
330 if self._role_arn is not None:
331 generated_user["user"]["exec"]["args"].extend([
332 "-r",
333 self._role_arn
334 ])
335
336 return generated_user
337
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py
--- a/awscli/customizations/eks/update_kubeconfig.py
+++ b/awscli/customizations/eks/update_kubeconfig.py
@@ -333,4 +333,10 @@
self._role_arn
])
+ if self._session.profile:
+ generated_user["user"]["exec"]["env"] = [OrderedDict([
+ ("name", "AWS_PROFILE"),
+ ("value", self._session.profile)
+ ])]
+
return generated_user
|
{"golden_diff": "diff --git a/awscli/customizations/eks/update_kubeconfig.py b/awscli/customizations/eks/update_kubeconfig.py\n--- a/awscli/customizations/eks/update_kubeconfig.py\n+++ b/awscli/customizations/eks/update_kubeconfig.py\n@@ -333,4 +333,10 @@\n self._role_arn\n ])\n \n+ if self._session.profile:\n+ generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n+ (\"name\", \"AWS_PROFILE\"),\n+ (\"value\", self._session.profile)\n+ ])]\n+\n return generated_user\n", "issue": "Passing AWS_PROFILE on update-kubeconfig cli\nAdd profile details if calling update passing update-kubeconfig with profile details \r\n\r\n#aws eks update-kubeconfig --name k8s-test --kubeconfig k8s-test.config --profile myprofile \r\n\r\nCurrently this generates as \r\n```\r\n user:\r\n exec:\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n args:\r\n - token\r\n - -i\r\n - k8s-test\r\n command: aws-iam-authenticator\r\n```\r\n\r\nIts good to add the profile details on the cli when not using default profile. \r\n\r\n```\r\n- name: aws\r\n user:\r\n exec:\r\n apiVersion: client.authentication.k8s.io/v1alpha1\r\n command: aws-iam-authenticator\r\n args:\r\n - \"token\"\r\n - \"-i\"\r\n - \"k8s-test\"\r\n\r\n env:\r\n - name: AWS_PROFILE\r\n value: \"myprofile\"\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.compat import is_windows\nfrom awscli.compat import which\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nAUTH_BIN = \"aws-iam-authenticator\"\n\nAUTH_GOGET_PATH = (\"github.com/kubernetes-sigs/\"\n \"aws-iam-authenticator/cmd/aws-iam-authenticator\")\n\n\ndef check_for_binary(binary):\n return which(binary) is not None\n\ndef warn_of_missing_dependencies():\n \"\"\"\n Check the dependencies of the kubectl configuration generated by\n update-kubeconfig to see if any are missing.\n If they are missing, print a warning with information on how to download\n the missing dependency\n\n Currently dependent on:\n aws-iam-authenticator\n \"\"\"\n if not check_for_binary(AUTH_BIN):\n uni_print(\"Warning: {0} is not installed properly or is not in your path.\\n\"\n \"Refer to the AWS Documentation to download it at \"\n \"https://docs.aws.amazon.com/eks/latest/userguide/configure-kubectl.html\\n\".format(AUTH_BIN))\n\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\" \n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n warn_of_missing_dependencies()\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\", \n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"token\",\n \"-i\",\n self._cluster_name\n ]),\n (\"command\", AUTH_BIN)\n ]))\n ]))\n ])\n\n if is_windows:\n generated_user[\"user\"][\"exec\"][\"command\"] += \".exe\"\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"-r\",\n self._role_arn\n ])\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nimport logging\n\nfrom botocore.compat import OrderedDict\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.utils import uni_print\nfrom awscli.compat import is_windows\nfrom awscli.compat import which\nfrom awscli.customizations.eks.exceptions import EKSClusterError\nfrom awscli.customizations.eks.kubeconfig import (Kubeconfig,\n KubeconfigError,\n KubeconfigLoader,\n KubeconfigWriter,\n KubeconfigValidator,\n KubeconfigAppender)\nfrom awscli.customizations.eks.ordered_yaml import ordered_yaml_dump\n\nLOG = logging.getLogger(__name__)\n\nDEFAULT_PATH = os.path.expanduser(\"~/.kube/config\")\n\n# Use the endpoint for kubernetes 1.10\n# To get the most recent endpoint we will need to\n# Do a check on the cluster's version number\nAPI_VERSION = \"client.authentication.k8s.io/v1alpha1\"\n\nAUTH_BIN = \"aws-iam-authenticator\"\n\nAUTH_GOGET_PATH = (\"github.com/kubernetes-sigs/\"\n \"aws-iam-authenticator/cmd/aws-iam-authenticator\")\n\n\ndef check_for_binary(binary):\n return which(binary) is not None\n\ndef warn_of_missing_dependencies():\n \"\"\"\n Check the dependencies of the kubectl configuration generated by\n update-kubeconfig to see if any are missing.\n If they are missing, print a warning with information on how to download\n the missing dependency\n\n Currently dependent on:\n aws-iam-authenticator\n \"\"\"\n if not check_for_binary(AUTH_BIN):\n uni_print(\"Warning: {0} is not installed properly or is not in your path.\\n\"\n \"Refer to the AWS Documentation to download it at \"\n \"https://docs.aws.amazon.com/eks/latest/userguide/configure-kubectl.html\\n\".format(AUTH_BIN))\n\n\nclass UpdateKubeconfigCommand(BasicCommand):\n NAME = 'update-kubeconfig'\n\n DESCRIPTION = BasicCommand.FROM_FILE(\n 'eks',\n 'update-kubeconfig',\n '_description.rst'\n )\n\n ARG_TABLE = [\n {\n 'name': 'name',\n 'help_text': (\"The name of the cluster for which \"\n \"to create a kubeconfig entry. \"\n \"This cluster must exist in your account and in the \"\n \"specified or configured default Region \"\n \"for your AWS CLI installation.\"),\n 'required': True\n },\n {\n 'name': 'kubeconfig',\n 'help_text': (\"Optionally specify a kubeconfig file to append \"\n \"with your configuration. \"\n \"By default, the configuration is written to the \"\n \"first file path in the KUBECONFIG \"\n \"environment variable (if it is set) \"\n \"or the default kubeconfig path (.kube/config) \"\n \"in your home directory.\"),\n 'required': False\n },\n {\n 'name': 'role-arn',\n 'help_text': (\"To assume a role for cluster authentication, \"\n \"specify an IAM role ARN with this option. \"\n \"For example, if you created a cluster \"\n \"while assuming an IAM role, \"\n \"then you must also assume that role to \"\n \"connect to the cluster the first time.\"),\n 'required': False\n },\n {\n 'name': 'dry-run',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print the merged kubeconfig to stdout instead of \"\n \"writing it to the specified file.\"),\n 'required': False\n },\n {\n 'name': 'verbose',\n 'action': 'store_true',\n 'default': False,\n 'help_text': (\"Print more detailed output \"\n \"when writing to the kubeconfig file, \"\n \"including the appended entries.\")\n }\n ]\n\n def _display_entries(self, entries):\n \"\"\" \n Display entries in yaml format\n\n :param entries: a list of OrderedDicts to be printed\n :type entries: list\n \"\"\"\n uni_print(\"Entries:\\n\\n\")\n for entry in entries:\n uni_print(ordered_yaml_dump(entry))\n uni_print(\"\\n\")\n\n def _run_main(self, parsed_args, parsed_globals):\n client = EKSClient(self._session,\n parsed_args.name,\n parsed_args.role_arn,\n parsed_globals)\n new_cluster_dict = client.get_cluster_entry()\n new_user_dict = client.get_user_entry()\n\n config_selector = KubeconfigSelector(\n os.environ.get(\"KUBECONFIG\", \"\"),\n parsed_args.kubeconfig\n )\n config = config_selector.choose_kubeconfig(\n new_cluster_dict[\"name\"]\n )\n updating_existing = config.has_cluster(new_cluster_dict[\"name\"])\n appender = KubeconfigAppender()\n new_context_dict = appender.insert_cluster_user_pair(config,\n new_cluster_dict,\n new_user_dict)\n\n if parsed_args.dry_run:\n uni_print(config.dump_content())\n else:\n writer = KubeconfigWriter()\n writer.write_kubeconfig(config)\n\n if updating_existing:\n uni_print(\"Updated context {0} in {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n else:\n uni_print(\"Added new context {0} to {1}\\n\".format(\n new_context_dict[\"name\"], config.path\n ))\n\n if parsed_args.verbose:\n self._display_entries([\n new_context_dict,\n new_user_dict,\n new_cluster_dict\n ])\n\n warn_of_missing_dependencies()\n\n\nclass KubeconfigSelector(object):\n\n def __init__(self, env_variable, path_in, validator=None,\n loader=None):\n \"\"\"\n Parse KUBECONFIG into a list of absolute paths.\n Also replace the empty list with DEFAULT_PATH\n\n :param env_variable: KUBECONFIG as a long string\n :type env_variable: string\n\n :param path_in: The path passed in through the CLI\n :type path_in: string or None\n \"\"\"\n if validator is None:\n validator = KubeconfigValidator()\n self._validator = validator\n\n if loader is None:\n loader = KubeconfigLoader(validator)\n self._loader = loader\n\n if path_in is not None:\n # Override environment variable\n self._paths = [self._expand_path(path_in)]\n else:\n # Get the list of paths from the environment variable\n if env_variable == \"\":\n env_variable = DEFAULT_PATH\n self._paths = [self._expand_path(element)\n for element in env_variable.split(os.pathsep)\n if len(element.strip()) > 0]\n if len(self._paths) == 0:\n self._paths = [DEFAULT_PATH]\n\n def choose_kubeconfig(self, cluster_name):\n \"\"\"\n Choose which kubeconfig file to read from.\n If name is already an entry in one of the $KUBECONFIG files,\n choose that one.\n Otherwise choose the first file.\n\n :param cluster_name: The name of the cluster which is going to be added\n :type cluster_name: String\n\n :return: a chosen Kubeconfig based on above rules\n :rtype: Kubeconfig\n \"\"\"\n # Search for an existing entry to update\n for candidate_path in self._paths:\n try:\n loaded_config = self._loader.load_kubeconfig(candidate_path)\n\n if loaded_config.has_cluster(cluster_name):\n LOG.debug(\"Found entry to update at {0}\".format(\n candidate_path\n ))\n return loaded_config\n except KubeconfigError as e:\n LOG.warning(\"Passing {0}:{1}\".format(candidate_path, e))\n\n # No entry was found, use the first file in KUBECONFIG\n #\n # Note: This could raise KubeconfigErrors if paths[0] is corrupted\n return self._loader.load_kubeconfig(self._paths[0])\n\n def _expand_path(self, path):\n \"\"\" A helper to expand a path to a full absolute path. \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n\n\nclass EKSClient(object):\n def __init__(self, session, cluster_name, role_arn, parsed_globals=None):\n self._session = session\n self._cluster_name = cluster_name\n self._role_arn = role_arn\n self._cluster_description = None\n self._globals = parsed_globals\n\n def _get_cluster_description(self):\n \"\"\"\n Use an eks describe-cluster call to get the cluster description\n Cache the response in self._cluster_description.\n describe-cluster will only be called once.\n \"\"\"\n if self._cluster_description is None:\n if self._globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\", \n region_name=self._globals.region,\n endpoint_url=self._globals.endpoint_url,\n verify=self._globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] != \"ACTIVE\":\n raise EKSClusterError(\"Cluster status not active\")\n\n return self._cluster_description\n\n def get_cluster_entry(self):\n \"\"\"\n Return a cluster entry generated using\n the previously obtained description.\n \"\"\"\n\n cert_data = self._get_cluster_description().get(\"certificateAuthority\",\n {\"data\": \"\"})[\"data\"]\n endpoint = self._get_cluster_description().get(\"endpoint\")\n arn = self._get_cluster_description().get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])\n\n def get_user_entry(self):\n \"\"\"\n Return a user entry generated using\n the previously obtained description.\n \"\"\"\n generated_user = OrderedDict([\n (\"name\", self._get_cluster_description().get(\"arn\", \"\")),\n (\"user\", OrderedDict([\n (\"exec\", OrderedDict([\n (\"apiVersion\", API_VERSION),\n (\"args\",\n [\n \"token\",\n \"-i\",\n self._cluster_name\n ]),\n (\"command\", AUTH_BIN)\n ]))\n ]))\n ])\n\n if is_windows:\n generated_user[\"user\"][\"exec\"][\"command\"] += \".exe\"\n\n if self._role_arn is not None:\n generated_user[\"user\"][\"exec\"][\"args\"].extend([\n \"-r\",\n self._role_arn\n ])\n\n if self._session.profile:\n generated_user[\"user\"][\"exec\"][\"env\"] = [OrderedDict([\n (\"name\", \"AWS_PROFILE\"),\n (\"value\", self._session.profile)\n ])]\n\n return generated_user\n", "path": "awscli/customizations/eks/update_kubeconfig.py"}]}
| 3,802 | 134 |
gh_patches_debug_39566
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-7010
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Missing Python executable from plugin venv causes error during `meltano install`
### Meltano Version
2.10.0
### Python Version
3.9
### Bug scope
CLI (options, error messages, logging, etc.)
### Operating System
Linux Mint 20.3
### Description
1. Install Python (e.g. using `pyenv`).
2. Use that version of Python to run `meltano install`.
3. Uninstall that version of Python.
4. Install a new version of Python (e.g. going from version 3.9.X to 3.9.Y)
5. Use that version of Python to run `meltano install` again:
```
Installing X plugins...
[...]
Need help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to
join our friendly Slack community.
[Errno 2] No such file or directory: '<project directory>/.meltano/<installed plugin>/venv/bin/python'
```
The venvs managed by Meltano assume that the underlying Python executable that `venv/bin/python` links to will not be deleted. In reality it may be, and if it is we should detect this and automatically replace the venv (as if the `--clean` flag has been provided).
### Code
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/venv_service.py`
Content:
```
1 """Manage Python virtual environments."""
2
3 from __future__ import annotations
4
5 import asyncio
6 import hashlib
7 import logging
8 import os
9 import platform
10 import shutil
11 import subprocess
12 import sys
13 from asyncio.subprocess import Process
14 from collections import namedtuple
15 from collections.abc import Iterable
16 from pathlib import Path
17
18 from meltano.core.error import AsyncSubprocessError
19 from meltano.core.project import Project
20
21 logger = logging.getLogger(__name__)
22
23 VenvSpecs = namedtuple("VenvSpecs", ("lib_dir", "bin_dir", "site_packages_dir"))
24
25 POSIX = VenvSpecs(
26 lib_dir="lib",
27 bin_dir="bin",
28 site_packages_dir=os.path.join(
29 "lib",
30 f"python{'.'.join(str(part) for part in sys.version_info[:2])}",
31 "site-packages",
32 ),
33 )
34
35 NT = VenvSpecs(
36 lib_dir="Lib",
37 bin_dir="Scripts",
38 site_packages_dir=os.path.join("Lib", "site-packages"),
39 )
40
41 PLATFORM_SPECS = {"Linux": POSIX, "Darwin": POSIX, "Windows": NT}
42
43
44 def venv_platform_specs():
45 """Get virtual environment sub-path info for the current platform.
46
47 Raises:
48 Exception: This platform is not supported.
49
50 Returns:
51 Virtual environment sub-path info for the current platform.
52 """
53 system = platform.system()
54 try:
55 return PLATFORM_SPECS[system]
56 except KeyError as ex:
57 raise Exception(f"Platform {system!r} not supported.") from ex
58
59
60 PIP_PACKAGES = ("pip", "setuptools", "wheel")
61
62
63 class VirtualEnv:
64 """Info about a single virtual environment."""
65
66 def __init__(self, root: Path):
67 """Initialize the `VirtualEnv` instance.
68
69 Args:
70 root: The root directory of the virtual environment.
71 """
72 self.root = root.resolve()
73 self.specs = venv_platform_specs()
74
75 def __getattr__(self, key: str):
76 """Get a specific attribute from this instance.
77
78 Used to provide `VenvSpecs` attributes for this specific virtual environment.
79
80 Args:
81 key: The attribute name. Must be one of the `VenvSpecs` attributes.
82
83 Returns:
84 The root directory of this virtual environment joined to the requested
85 platform-specific path using this platform's `VenvSpecs` instance.
86 """
87 return self.root / getattr(self.specs, key)
88
89 def __str__(self):
90 """_summary_.
91
92 Returns:
93 _description_.
94 """
95 return str(self.root)
96
97
98 async def exec_async(*args, **kwargs) -> Process:
99 """Run an executable asyncronously in a subprocess.
100
101 Args:
102 args: Positional arguments for `asyncio.create_subprocess_exec`.
103 kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.
104
105 Raises:
106 AsyncSubprocessError: The command failed.
107
108 Returns:
109 The subprocess.
110 """
111 run = await asyncio.create_subprocess_exec(
112 *args,
113 stdout=subprocess.PIPE,
114 stderr=subprocess.PIPE,
115 **kwargs,
116 )
117 await run.wait()
118
119 if run.returncode != 0:
120 raise AsyncSubprocessError("Command failed", run)
121
122 return run
123
124
125 def fingerprint(pip_install_args: Iterable[str]) -> str:
126 """Generate a hash identifying pip install args.
127
128 Arguments are sorted and deduplicated before the hash is generated.
129
130 Args:
131 pip_install_args: Arguments for `pip install`.
132
133 Returns:
134 The SHA256 hash hex digest of the sorted set of pip install args.
135 """
136 return hashlib.sha256(" ".join(sorted(set(pip_install_args))).encode()).hexdigest()
137
138
139 class VenvService: # noqa: WPS214
140 """Manages virtual environments.
141
142 The methods in this class are not threadsafe.
143 """
144
145 def __init__(self, project: Project, namespace: str = "", name: str = ""):
146 """Initialize the `VenvService`.
147
148 Args:
149 project: The Meltano project.
150 namespace: The namespace for the venv, e.g. a Plugin type.
151 name: The name of the venv, e.g. a Plugin name.
152 """
153 self.project = project
154 self.namespace = namespace
155 self.name = name
156 self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))
157 self.python_path = self.venv.bin_dir / "python"
158 self.plugin_fingerprint_path = self.venv.root / ".meltano_plugin_fingerprint"
159
160 async def install(self, pip_install_args: list[str], clean: bool = False) -> None:
161 """Configure a virtual environment, then run pip install with the given args.
162
163 Args:
164 pip_install_args: Arguments passed to `pip install`.
165 clean: Whether to not attempt to use an existing virtual environment.
166 """
167 if not clean and self.requires_clean_install(pip_install_args):
168 logger.debug(
169 f"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install."
170 )
171 clean = True
172
173 self.clean_run_files()
174 await self._pip_install(pip_install_args=pip_install_args, clean=clean)
175 self.write_fingerprint(pip_install_args)
176
177 def requires_clean_install(self, pip_install_args: list[str]) -> bool:
178 """Determine whether a clean install is needed.
179
180 Args:
181 pip_install_args: The arguments being passed to `pip install`, used
182 for fingerprinting the installation.
183
184 Returns:
185 Whether virtual environment doesn't exist or can't be reused.
186 """
187 if self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists():
188 # clean up deprecated feature
189 return True
190 existing_fingerprint = self.read_fingerprint()
191 return (
192 existing_fingerprint != fingerprint(pip_install_args)
193 if existing_fingerprint
194 else True
195 )
196
197 def clean_run_files(self) -> None:
198 """Destroy cached configuration files, if they exist."""
199 try:
200 shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))
201 except FileNotFoundError:
202 logger.debug("No cached configuration files to remove")
203
204 def clean(self) -> None:
205 """Destroy the virtual environment, if it exists."""
206 try:
207 shutil.rmtree(self.venv.root)
208 logger.debug(
209 "Removed old virtual environment for '%s/%s'", # noqa: WPS323
210 self.namespace,
211 self.name,
212 )
213 except FileNotFoundError:
214 # If the VirtualEnv has never been created before do nothing
215 logger.debug("No old virtual environment to remove")
216
217 async def create(self) -> Process:
218 """Create a new virtual environment.
219
220 Raises:
221 AsyncSubprocessError: The virtual environment could not be created.
222
223 Returns:
224 The Python process creating the virtual environment.
225 """
226 logger.debug(f"Creating virtual environment for '{self.namespace}/{self.name}'")
227 try:
228 return await exec_async(sys.executable, "-m", "venv", str(self.venv))
229 except AsyncSubprocessError as err:
230 raise AsyncSubprocessError(
231 f"Could not create the virtualenv for '{self.namespace}/{self.name}'",
232 err.process,
233 ) from err
234
235 async def upgrade_pip(self) -> Process:
236 """Upgrade the `pip` package to the latest version in the virtual environment.
237
238 Raises:
239 AsyncSubprocessError: Failed to upgrade pip to the latest version.
240
241 Returns:
242 The process running `pip install --upgrade ...`.
243 """
244 logger.debug(f"Upgrading pip for '{self.namespace}/{self.name}'")
245 try:
246 return await self._pip_install(["--upgrade", *PIP_PACKAGES])
247 except AsyncSubprocessError as err:
248 raise AsyncSubprocessError(
249 "Failed to upgrade pip to the latest version.", err.process
250 ) from err
251
252 def read_fingerprint(self) -> str | None:
253 """Get the fingerprint of the existing virtual environment.
254
255 Returns:
256 The fingerprint of the existing virtual environment if it exists.
257 `None` otherwise.
258 """
259 if not self.plugin_fingerprint_path.exists():
260 return None
261 with open(self.plugin_fingerprint_path) as fingerprint_file:
262 return fingerprint_file.read()
263
264 def write_fingerprint(self, pip_install_args: list[str]) -> None:
265 """Save the fingerprint for this installation.
266
267 Args:
268 pip_install_args: The arguments being passed to `pip install`.
269 """
270 with open(self.plugin_fingerprint_path, "wt") as fingerprint_file:
271 fingerprint_file.write(fingerprint(pip_install_args))
272
273 def exec_path(self, executable: str) -> Path:
274 """Return the absolute path for the given executable in the virtual environment.
275
276 Args:
277 executable: The path to the executable relative to the venv bin directory.
278
279 Returns:
280 The venv bin directory joined to the provided executable.
281 """
282 return self.venv.bin_dir / executable
283
284 async def _pip_install(
285 self, pip_install_args: list[str], clean: bool = False
286 ) -> Process:
287 """Install a package using `pip` in the proper virtual environment.
288
289 Args:
290 pip_install_args: The arguments to pass to `pip install`.
291 clean: Whether the installation should be done in a clean venv.
292
293 Raises:
294 AsyncSubprocessError: The command failed.
295
296 Returns:
297 The process running `pip install` with the provided args.
298 """
299 if clean:
300 self.clean()
301 await self.create()
302 await self.upgrade_pip()
303
304 pip_install_args_str = " ".join(pip_install_args)
305 log_msg_prefix = (
306 f"Upgrading with args {pip_install_args_str!r} in existing"
307 if "--upgrade" in pip_install_args
308 else f"Installing with args {pip_install_args_str!r} into"
309 )
310 logger.debug(
311 f"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'"
312 )
313
314 try:
315 return await exec_async(
316 str(self.python_path), "-m", "pip", "install", *pip_install_args
317 )
318 except AsyncSubprocessError as err:
319 raise AsyncSubprocessError(
320 f"Failed to install plugin '{self.name}'.", err.process
321 ) from err
322
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py
--- a/src/meltano/core/venv_service.py
+++ b/src/meltano/core/venv_service.py
@@ -154,7 +154,6 @@
self.namespace = namespace
self.name = name
self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))
- self.python_path = self.venv.bin_dir / "python"
self.plugin_fingerprint_path = self.venv.root / ".meltano_plugin_fingerprint"
async def install(self, pip_install_args: list[str], clean: bool = False) -> None:
@@ -184,15 +183,18 @@
Returns:
Whether virtual environment doesn't exist or can't be reused.
"""
- if self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists():
- # clean up deprecated feature
- return True
- existing_fingerprint = self.read_fingerprint()
- return (
- existing_fingerprint != fingerprint(pip_install_args)
- if existing_fingerprint
- else True
- )
+ # A generator function is used to perform the checks lazily
+ def checks():
+ # The Python installation used to create this venv no longer exists
+ yield not self.exec_path("python").exists()
+ # The deprecated `meltano_venv.pth` feature is used by this venv
+ yield self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists()
+ # The fingerprint of the venv does not match the pip install args
+ existing_fingerprint = self.read_fingerprint()
+ yield existing_fingerprint is None
+ yield existing_fingerprint != fingerprint(pip_install_args)
+
+ return any(checks())
def clean_run_files(self) -> None:
"""Destroy cached configuration files, if they exist."""
@@ -279,7 +281,18 @@
Returns:
The venv bin directory joined to the provided executable.
"""
- return self.venv.bin_dir / executable
+ absolute_executable = self.venv.bin_dir / executable
+ if platform.system() != "Windows":
+ return absolute_executable
+
+ # On Windows, try using the '.exe' suffixed version if it exists. Use the
+ # regular executable path as a fallback (and for backwards compatibility).
+ absolute_executable_windows = absolute_executable.with_suffix(".exe")
+ return (
+ absolute_executable_windows
+ if absolute_executable_windows.exists()
+ else absolute_executable
+ )
async def _pip_install(
self, pip_install_args: list[str], clean: bool = False
@@ -313,7 +326,7 @@
try:
return await exec_async(
- str(self.python_path), "-m", "pip", "install", *pip_install_args
+ str(self.exec_path("python")), "-m", "pip", "install", *pip_install_args
)
except AsyncSubprocessError as err:
raise AsyncSubprocessError(
|
{"golden_diff": "diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py\n--- a/src/meltano/core/venv_service.py\n+++ b/src/meltano/core/venv_service.py\n@@ -154,7 +154,6 @@\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n- self.python_path = self.venv.bin_dir / \"python\"\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n \n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n@@ -184,15 +183,18 @@\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n- if self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists():\n- # clean up deprecated feature\n- return True\n- existing_fingerprint = self.read_fingerprint()\n- return (\n- existing_fingerprint != fingerprint(pip_install_args)\n- if existing_fingerprint\n- else True\n- )\n+ # A generator function is used to perform the checks lazily\n+ def checks():\n+ # The Python installation used to create this venv no longer exists\n+ yield not self.exec_path(\"python\").exists()\n+ # The deprecated `meltano_venv.pth` feature is used by this venv\n+ yield self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists()\n+ # The fingerprint of the venv does not match the pip install args\n+ existing_fingerprint = self.read_fingerprint()\n+ yield existing_fingerprint is None\n+ yield existing_fingerprint != fingerprint(pip_install_args)\n+\n+ return any(checks())\n \n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n@@ -279,7 +281,18 @@\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n- return self.venv.bin_dir / executable\n+ absolute_executable = self.venv.bin_dir / executable\n+ if platform.system() != \"Windows\":\n+ return absolute_executable\n+\n+ # On Windows, try using the '.exe' suffixed version if it exists. Use the\n+ # regular executable path as a fallback (and for backwards compatibility).\n+ absolute_executable_windows = absolute_executable.with_suffix(\".exe\")\n+ return (\n+ absolute_executable_windows\n+ if absolute_executable_windows.exists()\n+ else absolute_executable\n+ )\n \n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n@@ -313,7 +326,7 @@\n \n try:\n return await exec_async(\n- str(self.python_path), \"-m\", \"pip\", \"install\", *pip_install_args\n+ str(self.exec_path(\"python\")), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n", "issue": "bug: Missing Python executable from plugin venv causes error during `meltano install`\n### Meltano Version\n\n2.10.0\n\n### Python Version\n\n3.9\n\n### Bug scope\n\nCLI (options, error messages, logging, etc.)\n\n### Operating System\n\nLinux Mint 20.3\n\n### Description\n\n1. Install Python (e.g. using `pyenv`).\r\n2. Use that version of Python to run `meltano install`.\r\n3. Uninstall that version of Python.\r\n4. Install a new version of Python (e.g. going from version 3.9.X to 3.9.Y)\r\n5. Use that version of Python to run `meltano install` again:\r\n\r\n```\r\nInstalling X plugins...\r\n[...]\r\n\r\nNeed help fixing this problem? Visit http://melta.no/ for troubleshooting steps, or to\r\njoin our friendly Slack community.\r\n\r\n[Errno 2] No such file or directory: '<project directory>/.meltano/<installed plugin>/venv/bin/python'\r\n```\r\n\r\nThe venvs managed by Meltano assume that the underlying Python executable that `venv/bin/python` links to will not be deleted. In reality it may be, and if it is we should detect this and automatically replace the venv (as if the `--clean` flag has been provided).\n\n### Code\n\n_No response_\n", "before_files": [{"content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.project import Project\n\nlogger = logging.getLogger(__name__)\n\nVenvSpecs = namedtuple(\"VenvSpecs\", (\"lib_dir\", \"bin_dir\", \"site_packages_dir\"))\n\nPOSIX = VenvSpecs(\n lib_dir=\"lib\",\n bin_dir=\"bin\",\n site_packages_dir=os.path.join(\n \"lib\",\n f\"python{'.'.join(str(part) for part in sys.version_info[:2])}\",\n \"site-packages\",\n ),\n)\n\nNT = VenvSpecs(\n lib_dir=\"Lib\",\n bin_dir=\"Scripts\",\n site_packages_dir=os.path.join(\"Lib\", \"site-packages\"),\n)\n\nPLATFORM_SPECS = {\"Linux\": POSIX, \"Darwin\": POSIX, \"Windows\": NT}\n\n\ndef venv_platform_specs():\n \"\"\"Get virtual environment sub-path info for the current platform.\n\n Raises:\n Exception: This platform is not supported.\n\n Returns:\n Virtual environment sub-path info for the current platform.\n \"\"\"\n system = platform.system()\n try:\n return PLATFORM_SPECS[system]\n except KeyError as ex:\n raise Exception(f\"Platform {system!r} not supported.\") from ex\n\n\nPIP_PACKAGES = (\"pip\", \"setuptools\", \"wheel\")\n\n\nclass VirtualEnv:\n \"\"\"Info about a single virtual environment.\"\"\"\n\n def __init__(self, root: Path):\n \"\"\"Initialize the `VirtualEnv` instance.\n\n Args:\n root: The root directory of the virtual environment.\n \"\"\"\n self.root = root.resolve()\n self.specs = venv_platform_specs()\n\n def __getattr__(self, key: str):\n \"\"\"Get a specific attribute from this instance.\n\n Used to provide `VenvSpecs` attributes for this specific virtual environment.\n\n Args:\n key: The attribute name. Must be one of the `VenvSpecs` attributes.\n\n Returns:\n The root directory of this virtual environment joined to the requested\n platform-specific path using this platform's `VenvSpecs` instance.\n \"\"\"\n return self.root / getattr(self.specs, key)\n\n def __str__(self):\n \"\"\"_summary_.\n\n Returns:\n _description_.\n \"\"\"\n return str(self.root)\n\n\nasync def exec_async(*args, **kwargs) -> Process:\n \"\"\"Run an executable asyncronously in a subprocess.\n\n Args:\n args: Positional arguments for `asyncio.create_subprocess_exec`.\n kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The subprocess.\n \"\"\"\n run = await asyncio.create_subprocess_exec(\n *args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs,\n )\n await run.wait()\n\n if run.returncode != 0:\n raise AsyncSubprocessError(\"Command failed\", run)\n\n return run\n\n\ndef fingerprint(pip_install_args: Iterable[str]) -> str:\n \"\"\"Generate a hash identifying pip install args.\n\n Arguments are sorted and deduplicated before the hash is generated.\n\n Args:\n pip_install_args: Arguments for `pip install`.\n\n Returns:\n The SHA256 hash hex digest of the sorted set of pip install args.\n \"\"\"\n return hashlib.sha256(\" \".join(sorted(set(pip_install_args))).encode()).hexdigest()\n\n\nclass VenvService: # noqa: WPS214\n \"\"\"Manages virtual environments.\n\n The methods in this class are not threadsafe.\n \"\"\"\n\n def __init__(self, project: Project, namespace: str = \"\", name: str = \"\"):\n \"\"\"Initialize the `VenvService`.\n\n Args:\n project: The Meltano project.\n namespace: The namespace for the venv, e.g. a Plugin type.\n name: The name of the venv, e.g. a Plugin name.\n \"\"\"\n self.project = project\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n self.python_path = self.venv.bin_dir / \"python\"\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n\n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n \"\"\"Configure a virtual environment, then run pip install with the given args.\n\n Args:\n pip_install_args: Arguments passed to `pip install`.\n clean: Whether to not attempt to use an existing virtual environment.\n \"\"\"\n if not clean and self.requires_clean_install(pip_install_args):\n logger.debug(\n f\"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install.\"\n )\n clean = True\n\n self.clean_run_files()\n await self._pip_install(pip_install_args=pip_install_args, clean=clean)\n self.write_fingerprint(pip_install_args)\n\n def requires_clean_install(self, pip_install_args: list[str]) -> bool:\n \"\"\"Determine whether a clean install is needed.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`, used\n for fingerprinting the installation.\n\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n if self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists():\n # clean up deprecated feature\n return True\n existing_fingerprint = self.read_fingerprint()\n return (\n existing_fingerprint != fingerprint(pip_install_args)\n if existing_fingerprint\n else True\n )\n\n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n try:\n shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))\n except FileNotFoundError:\n logger.debug(\"No cached configuration files to remove\")\n\n def clean(self) -> None:\n \"\"\"Destroy the virtual environment, if it exists.\"\"\"\n try:\n shutil.rmtree(self.venv.root)\n logger.debug(\n \"Removed old virtual environment for '%s/%s'\", # noqa: WPS323\n self.namespace,\n self.name,\n )\n except FileNotFoundError:\n # If the VirtualEnv has never been created before do nothing\n logger.debug(\"No old virtual environment to remove\")\n\n async def create(self) -> Process:\n \"\"\"Create a new virtual environment.\n\n Raises:\n AsyncSubprocessError: The virtual environment could not be created.\n\n Returns:\n The Python process creating the virtual environment.\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n err.process,\n ) from err\n\n async def upgrade_pip(self) -> Process:\n \"\"\"Upgrade the `pip` package to the latest version in the virtual environment.\n\n Raises:\n AsyncSubprocessError: Failed to upgrade pip to the latest version.\n\n Returns:\n The process running `pip install --upgrade ...`.\n \"\"\"\n logger.debug(f\"Upgrading pip for '{self.namespace}/{self.name}'\")\n try:\n return await self._pip_install([\"--upgrade\", *PIP_PACKAGES])\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n \"Failed to upgrade pip to the latest version.\", err.process\n ) from err\n\n def read_fingerprint(self) -> str | None:\n \"\"\"Get the fingerprint of the existing virtual environment.\n\n Returns:\n The fingerprint of the existing virtual environment if it exists.\n `None` otherwise.\n \"\"\"\n if not self.plugin_fingerprint_path.exists():\n return None\n with open(self.plugin_fingerprint_path) as fingerprint_file:\n return fingerprint_file.read()\n\n def write_fingerprint(self, pip_install_args: list[str]) -> None:\n \"\"\"Save the fingerprint for this installation.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`.\n \"\"\"\n with open(self.plugin_fingerprint_path, \"wt\") as fingerprint_file:\n fingerprint_file.write(fingerprint(pip_install_args))\n\n def exec_path(self, executable: str) -> Path:\n \"\"\"Return the absolute path for the given executable in the virtual environment.\n\n Args:\n executable: The path to the executable relative to the venv bin directory.\n\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n return self.venv.bin_dir / executable\n\n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n ) -> Process:\n \"\"\"Install a package using `pip` in the proper virtual environment.\n\n Args:\n pip_install_args: The arguments to pass to `pip install`.\n clean: Whether the installation should be done in a clean venv.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The process running `pip install` with the provided args.\n \"\"\"\n if clean:\n self.clean()\n await self.create()\n await self.upgrade_pip()\n\n pip_install_args_str = \" \".join(pip_install_args)\n log_msg_prefix = (\n f\"Upgrading with args {pip_install_args_str!r} in existing\"\n if \"--upgrade\" in pip_install_args\n else f\"Installing with args {pip_install_args_str!r} into\"\n )\n logger.debug(\n f\"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'\"\n )\n\n try:\n return await exec_async(\n str(self.python_path), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Failed to install plugin '{self.name}'.\", err.process\n ) from err\n", "path": "src/meltano/core/venv_service.py"}], "after_files": [{"content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.project import Project\n\nlogger = logging.getLogger(__name__)\n\nVenvSpecs = namedtuple(\"VenvSpecs\", (\"lib_dir\", \"bin_dir\", \"site_packages_dir\"))\n\nPOSIX = VenvSpecs(\n lib_dir=\"lib\",\n bin_dir=\"bin\",\n site_packages_dir=os.path.join(\n \"lib\",\n f\"python{'.'.join(str(part) for part in sys.version_info[:2])}\",\n \"site-packages\",\n ),\n)\n\nNT = VenvSpecs(\n lib_dir=\"Lib\",\n bin_dir=\"Scripts\",\n site_packages_dir=os.path.join(\"Lib\", \"site-packages\"),\n)\n\nPLATFORM_SPECS = {\"Linux\": POSIX, \"Darwin\": POSIX, \"Windows\": NT}\n\n\ndef venv_platform_specs():\n \"\"\"Get virtual environment sub-path info for the current platform.\n\n Raises:\n Exception: This platform is not supported.\n\n Returns:\n Virtual environment sub-path info for the current platform.\n \"\"\"\n system = platform.system()\n try:\n return PLATFORM_SPECS[system]\n except KeyError as ex:\n raise Exception(f\"Platform {system!r} not supported.\") from ex\n\n\nPIP_PACKAGES = (\"pip\", \"setuptools\", \"wheel\")\n\n\nclass VirtualEnv:\n \"\"\"Info about a single virtual environment.\"\"\"\n\n def __init__(self, root: Path):\n \"\"\"Initialize the `VirtualEnv` instance.\n\n Args:\n root: The root directory of the virtual environment.\n \"\"\"\n self.root = root.resolve()\n self.specs = venv_platform_specs()\n\n def __getattr__(self, key: str):\n \"\"\"Get a specific attribute from this instance.\n\n Used to provide `VenvSpecs` attributes for this specific virtual environment.\n\n Args:\n key: The attribute name. Must be one of the `VenvSpecs` attributes.\n\n Returns:\n The root directory of this virtual environment joined to the requested\n platform-specific path using this platform's `VenvSpecs` instance.\n \"\"\"\n return self.root / getattr(self.specs, key)\n\n def __str__(self):\n \"\"\"_summary_.\n\n Returns:\n _description_.\n \"\"\"\n return str(self.root)\n\n\nasync def exec_async(*args, **kwargs) -> Process:\n \"\"\"Run an executable asyncronously in a subprocess.\n\n Args:\n args: Positional arguments for `asyncio.create_subprocess_exec`.\n kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The subprocess.\n \"\"\"\n run = await asyncio.create_subprocess_exec(\n *args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs,\n )\n await run.wait()\n\n if run.returncode != 0:\n raise AsyncSubprocessError(\"Command failed\", run)\n\n return run\n\n\ndef fingerprint(pip_install_args: Iterable[str]) -> str:\n \"\"\"Generate a hash identifying pip install args.\n\n Arguments are sorted and deduplicated before the hash is generated.\n\n Args:\n pip_install_args: Arguments for `pip install`.\n\n Returns:\n The SHA256 hash hex digest of the sorted set of pip install args.\n \"\"\"\n return hashlib.sha256(\" \".join(sorted(set(pip_install_args))).encode()).hexdigest()\n\n\nclass VenvService: # noqa: WPS214\n \"\"\"Manages virtual environments.\n\n The methods in this class are not threadsafe.\n \"\"\"\n\n def __init__(self, project: Project, namespace: str = \"\", name: str = \"\"):\n \"\"\"Initialize the `VenvService`.\n\n Args:\n project: The Meltano project.\n namespace: The namespace for the venv, e.g. a Plugin type.\n name: The name of the venv, e.g. a Plugin name.\n \"\"\"\n self.project = project\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n\n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n \"\"\"Configure a virtual environment, then run pip install with the given args.\n\n Args:\n pip_install_args: Arguments passed to `pip install`.\n clean: Whether to not attempt to use an existing virtual environment.\n \"\"\"\n if not clean and self.requires_clean_install(pip_install_args):\n logger.debug(\n f\"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install.\"\n )\n clean = True\n\n self.clean_run_files()\n await self._pip_install(pip_install_args=pip_install_args, clean=clean)\n self.write_fingerprint(pip_install_args)\n\n def requires_clean_install(self, pip_install_args: list[str]) -> bool:\n \"\"\"Determine whether a clean install is needed.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`, used\n for fingerprinting the installation.\n\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n # A generator function is used to perform the checks lazily\n def checks():\n # The Python installation used to create this venv no longer exists\n yield not self.exec_path(\"python\").exists()\n # The deprecated `meltano_venv.pth` feature is used by this venv\n yield self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists()\n # The fingerprint of the venv does not match the pip install args\n existing_fingerprint = self.read_fingerprint()\n yield existing_fingerprint is None\n yield existing_fingerprint != fingerprint(pip_install_args)\n\n return any(checks())\n\n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n try:\n shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))\n except FileNotFoundError:\n logger.debug(\"No cached configuration files to remove\")\n\n def clean(self) -> None:\n \"\"\"Destroy the virtual environment, if it exists.\"\"\"\n try:\n shutil.rmtree(self.venv.root)\n logger.debug(\n \"Removed old virtual environment for '%s/%s'\", # noqa: WPS323\n self.namespace,\n self.name,\n )\n except FileNotFoundError:\n # If the VirtualEnv has never been created before do nothing\n logger.debug(\"No old virtual environment to remove\")\n\n async def create(self) -> Process:\n \"\"\"Create a new virtual environment.\n\n Raises:\n AsyncSubprocessError: The virtual environment could not be created.\n\n Returns:\n The Python process creating the virtual environment.\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n err.process,\n ) from err\n\n async def upgrade_pip(self) -> Process:\n \"\"\"Upgrade the `pip` package to the latest version in the virtual environment.\n\n Raises:\n AsyncSubprocessError: Failed to upgrade pip to the latest version.\n\n Returns:\n The process running `pip install --upgrade ...`.\n \"\"\"\n logger.debug(f\"Upgrading pip for '{self.namespace}/{self.name}'\")\n try:\n return await self._pip_install([\"--upgrade\", *PIP_PACKAGES])\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n \"Failed to upgrade pip to the latest version.\", err.process\n ) from err\n\n def read_fingerprint(self) -> str | None:\n \"\"\"Get the fingerprint of the existing virtual environment.\n\n Returns:\n The fingerprint of the existing virtual environment if it exists.\n `None` otherwise.\n \"\"\"\n if not self.plugin_fingerprint_path.exists():\n return None\n with open(self.plugin_fingerprint_path) as fingerprint_file:\n return fingerprint_file.read()\n\n def write_fingerprint(self, pip_install_args: list[str]) -> None:\n \"\"\"Save the fingerprint for this installation.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`.\n \"\"\"\n with open(self.plugin_fingerprint_path, \"wt\") as fingerprint_file:\n fingerprint_file.write(fingerprint(pip_install_args))\n\n def exec_path(self, executable: str) -> Path:\n \"\"\"Return the absolute path for the given executable in the virtual environment.\n\n Args:\n executable: The path to the executable relative to the venv bin directory.\n\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n absolute_executable = self.venv.bin_dir / executable\n if platform.system() != \"Windows\":\n return absolute_executable\n\n # On Windows, try using the '.exe' suffixed version if it exists. Use the\n # regular executable path as a fallback (and for backwards compatibility).\n absolute_executable_windows = absolute_executable.with_suffix(\".exe\")\n return (\n absolute_executable_windows\n if absolute_executable_windows.exists()\n else absolute_executable\n )\n\n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n ) -> Process:\n \"\"\"Install a package using `pip` in the proper virtual environment.\n\n Args:\n pip_install_args: The arguments to pass to `pip install`.\n clean: Whether the installation should be done in a clean venv.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The process running `pip install` with the provided args.\n \"\"\"\n if clean:\n self.clean()\n await self.create()\n await self.upgrade_pip()\n\n pip_install_args_str = \" \".join(pip_install_args)\n log_msg_prefix = (\n f\"Upgrading with args {pip_install_args_str!r} in existing\"\n if \"--upgrade\" in pip_install_args\n else f\"Installing with args {pip_install_args_str!r} into\"\n )\n logger.debug(\n f\"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'\"\n )\n\n try:\n return await exec_async(\n str(self.exec_path(\"python\")), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Failed to install plugin '{self.name}'.\", err.process\n ) from err\n", "path": "src/meltano/core/venv_service.py"}]}
| 3,641 | 710 |
gh_patches_debug_20571
|
rasdani/github-patches
|
git_diff
|
python__peps-2689
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Link topic page(s) in the rendered topic header
As a followup to #2579 implementing #2572 , it would be very useful to have the `Topic` header in the rendered PEPs actually link to said topic(s), and would be easy to implement (so long as we keep the display names consistent with the URLs, minus caps/whitespace).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pep_sphinx_extensions/pep_processor/transforms/pep_headers.py`
Content:
```
1 from pathlib import Path
2 import re
3
4 from docutils import nodes
5 from docutils import transforms
6 from sphinx import errors
7
8 from pep_sphinx_extensions.pep_processor.transforms import pep_zero
9 from pep_sphinx_extensions.pep_processor.transforms.pep_zero import _mask_email
10
11
12 class PEPParsingError(errors.SphinxError):
13 pass
14
15
16 # PEPHeaders is identical to docutils.transforms.peps.Headers excepting bdfl-delegate, sponsor & superseeded-by
17 class PEPHeaders(transforms.Transform):
18 """Process fields in a PEP's initial RFC-2822 header."""
19
20 # Run before pep_processor.transforms.pep_title.PEPTitle
21 default_priority = 330
22
23 def apply(self) -> None:
24 if not Path(self.document["source"]).match("pep-*"):
25 return # not a PEP file, exit early
26
27 if not len(self.document):
28 raise PEPParsingError("Document tree is empty.")
29
30 header = self.document[0]
31 if not isinstance(header, nodes.field_list) or "rfc2822" not in header["classes"]:
32 raise PEPParsingError("Document does not begin with an RFC-2822 header; it is not a PEP.")
33
34 # PEP number should be the first field
35 pep_field = header[0]
36 if pep_field[0].astext().lower() != "pep":
37 raise PEPParsingError("Document does not contain an RFC-2822 'PEP' header!")
38
39 # Extract PEP number
40 value = pep_field[1].astext()
41 try:
42 pep_num = int(value)
43 except ValueError:
44 raise PEPParsingError(f"'PEP' header must contain an integer. '{value}' is invalid!")
45
46 # Special processing for PEP 0.
47 if pep_num == 0:
48 pending = nodes.pending(pep_zero.PEPZero)
49 self.document.insert(1, pending)
50 self.document.note_pending(pending)
51
52 # If there are less than two headers in the preamble, or if Title is absent
53 if len(header) < 2 or header[1][0].astext().lower() != "title":
54 raise PEPParsingError("No title!")
55
56 fields_to_remove = []
57 for field in header:
58 name = field[0].astext().lower()
59 body = field[1]
60 if len(body) == 0:
61 # body is empty
62 continue
63 elif len(body) > 1:
64 msg = f"PEP header field body contains multiple elements:\n{field.pformat(level=1)}"
65 raise PEPParsingError(msg)
66 elif not isinstance(body[0], nodes.paragraph): # len(body) == 1
67 msg = f"PEP header field body may only contain a single paragraph:\n{field.pformat(level=1)}"
68 raise PEPParsingError(msg)
69
70 para = body[0]
71 if name in {"author", "bdfl-delegate", "pep-delegate", "sponsor"}:
72 # mask emails
73 for node in para:
74 if not isinstance(node, nodes.reference):
75 continue
76 node.replace_self(_mask_email(node))
77 elif name in {"discussions-to", "resolution", "post-history"}:
78 # Prettify mailing list and Discourse links
79 for node in para:
80 if (not isinstance(node, nodes.reference)
81 or not node["refuri"]):
82 continue
83 # Have known mailto links link to their main list pages
84 if node["refuri"].lower().startswith("mailto:"):
85 node["refuri"] = _generate_list_url(node["refuri"])
86 parts = node["refuri"].lower().split("/")
87 if len(parts) <= 2 or parts[2] not in LINK_PRETTIFIERS:
88 continue
89 pretty_title = _make_link_pretty(str(node["refuri"]))
90 if name == "post-history":
91 node["reftitle"] = pretty_title
92 else:
93 node[0] = nodes.Text(pretty_title)
94 elif name in {"replaces", "superseded-by", "requires"}:
95 # replace PEP numbers with normalised list of links to PEPs
96 new_body = []
97 for pep_str in re.split(r",?\s+", body.astext()):
98 target = self.document.settings.pep_url.format(int(pep_str))
99 new_body += [nodes.reference("", pep_str, refuri=target), nodes.Text(", ")]
100 para[:] = new_body[:-1] # drop trailing space
101 elif name in {"last-modified", "content-type", "version"}:
102 # Mark unneeded fields
103 fields_to_remove.append(field)
104
105 # Remove any trailing commas and whitespace in the headers
106 if para and isinstance(para[-1], nodes.Text):
107 last_node = para[-1]
108 if last_node.astext().strip() == ",":
109 last_node.parent.remove(last_node)
110 else:
111 para[-1] = last_node.rstrip().rstrip(",")
112
113 # Remove unneeded fields
114 for field in fields_to_remove:
115 field.parent.remove(field)
116
117
118 def _generate_list_url(mailto: str) -> str:
119 list_name_domain = mailto.lower().removeprefix("mailto:").strip()
120 list_name = list_name_domain.split("@")[0]
121
122 if list_name_domain.endswith("@googlegroups.com"):
123 return f"https://groups.google.com/g/{list_name}"
124
125 if not list_name_domain.endswith("@python.org"):
126 return mailto
127
128 # Active lists not yet on Mailman3; this URL will redirect if/when they are
129 if list_name in {"csv", "db-sig", "doc-sig", "python-list", "web-sig"}:
130 return f"https://mail.python.org/mailman/listinfo/{list_name}"
131 # Retired lists that are closed for posting, so only the archive matters
132 if list_name in {"import-sig", "python-3000"}:
133 return f"https://mail.python.org/pipermail/{list_name}/"
134 # The remaining lists (and any new ones) are all on Mailman3/Hyperkitty
135 return f"https://mail.python.org/archives/list/{list_name}@python.org/"
136
137
138 def _process_list_url(parts: list[str]) -> tuple[str, str]:
139 item_type = "list"
140
141 # HyperKitty (Mailman3) archive structure is
142 # https://mail.python.org/archives/list/<list_name>/thread/<id>
143 if "archives" in parts:
144 list_name = (
145 parts[parts.index("archives") + 2].removesuffix("@python.org"))
146 if len(parts) > 6 and parts[6] in {"message", "thread"}:
147 item_type = parts[6]
148
149 # Mailman3 list info structure is
150 # https://mail.python.org/mailman3/lists/<list_name>.python.org/
151 elif "mailman3" in parts:
152 list_name = (
153 parts[parts.index("mailman3") + 2].removesuffix(".python.org"))
154
155 # Pipermail (Mailman) archive structure is
156 # https://mail.python.org/pipermail/<list_name>/<month>-<year>/<id>
157 elif "pipermail" in parts:
158 list_name = parts[parts.index("pipermail") + 1]
159 item_type = "message" if len(parts) > 6 else "list"
160
161 # Mailman listinfo structure is
162 # https://mail.python.org/mailman/listinfo/<list_name>
163 elif "listinfo" in parts:
164 list_name = parts[parts.index("listinfo") + 1]
165
166 # Not a link to a mailing list, message or thread
167 else:
168 raise ValueError(
169 f"{'/'.join(parts)} not a link to a list, message or thread")
170
171 return list_name, item_type
172
173
174 def _process_discourse_url(parts: list[str]) -> tuple[str, str]:
175 item_name = "discourse"
176
177 if len(parts) < 5 or ("t" not in parts and "c" not in parts):
178 raise ValueError(
179 f"{'/'.join(parts)} not a link to a Discourse thread or category")
180
181 first_subpart = parts[4]
182 has_title = not first_subpart.isnumeric()
183
184 if "t" in parts:
185 item_type = "post" if len(parts) > (5 + has_title) else "thread"
186 elif "c" in parts:
187 item_type = "category"
188 if has_title:
189 item_name = f"{first_subpart.replace('-', ' ')} {item_name}"
190
191 return item_name, item_type
192
193
194 # Domains supported for pretty URL parsing
195 LINK_PRETTIFIERS = {
196 "mail.python.org": _process_list_url,
197 "discuss.python.org": _process_discourse_url,
198 }
199
200
201 def _process_pretty_url(url: str) -> tuple[str, str]:
202 parts = url.lower().strip().strip("/").split("/")
203 try:
204 item_name, item_type = LINK_PRETTIFIERS[parts[2]](parts)
205 except KeyError as error:
206 raise ValueError(
207 f"{url} not a link to a recognized domain to prettify") from error
208 item_name = item_name.title().replace("Sig", "SIG").replace("Pep", "PEP")
209 return item_name, item_type
210
211
212 def _make_link_pretty(url: str) -> str:
213 item_name, item_type = _process_pretty_url(url)
214 return f"{item_name} {item_type}"
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py
--- a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py
+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py
@@ -98,6 +98,17 @@
target = self.document.settings.pep_url.format(int(pep_str))
new_body += [nodes.reference("", pep_str, refuri=target), nodes.Text(", ")]
para[:] = new_body[:-1] # drop trailing space
+ elif name == "topic":
+ new_body = []
+ for topic_name in body.astext().split(","):
+ if topic_name:
+ target = f"/topic/{topic_name.lower().strip()}/"
+ new_body += [
+ nodes.reference("", topic_name, refuri=target),
+ nodes.Text(", "),
+ ]
+ if new_body:
+ para[:] = new_body[:-1] # Drop trailing space/comma
elif name in {"last-modified", "content-type", "version"}:
# Mark unneeded fields
fields_to_remove.append(field)
|
{"golden_diff": "diff --git a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py\n--- a/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py\n+++ b/pep_sphinx_extensions/pep_processor/transforms/pep_headers.py\n@@ -98,6 +98,17 @@\n target = self.document.settings.pep_url.format(int(pep_str))\n new_body += [nodes.reference(\"\", pep_str, refuri=target), nodes.Text(\", \")]\n para[:] = new_body[:-1] # drop trailing space\n+ elif name == \"topic\":\n+ new_body = []\n+ for topic_name in body.astext().split(\",\"):\n+ if topic_name:\n+ target = f\"/topic/{topic_name.lower().strip()}/\"\n+ new_body += [\n+ nodes.reference(\"\", topic_name, refuri=target),\n+ nodes.Text(\", \"),\n+ ]\n+ if new_body:\n+ para[:] = new_body[:-1] # Drop trailing space/comma\n elif name in {\"last-modified\", \"content-type\", \"version\"}:\n # Mark unneeded fields\n fields_to_remove.append(field)\n", "issue": "Link topic page(s) in the rendered topic header\nAs a followup to #2579 implementing #2572 , it would be very useful to have the `Topic` header in the rendered PEPs actually link to said topic(s), and would be easy to implement (so long as we keep the display names consistent with the URLs, minus caps/whitespace).\n", "before_files": [{"content": "from pathlib import Path\nimport re\n\nfrom docutils import nodes\nfrom docutils import transforms\nfrom sphinx import errors\n\nfrom pep_sphinx_extensions.pep_processor.transforms import pep_zero\nfrom pep_sphinx_extensions.pep_processor.transforms.pep_zero import _mask_email\n\n\nclass PEPParsingError(errors.SphinxError):\n pass\n\n\n# PEPHeaders is identical to docutils.transforms.peps.Headers excepting bdfl-delegate, sponsor & superseeded-by\nclass PEPHeaders(transforms.Transform):\n \"\"\"Process fields in a PEP's initial RFC-2822 header.\"\"\"\n\n # Run before pep_processor.transforms.pep_title.PEPTitle\n default_priority = 330\n\n def apply(self) -> None:\n if not Path(self.document[\"source\"]).match(\"pep-*\"):\n return # not a PEP file, exit early\n\n if not len(self.document):\n raise PEPParsingError(\"Document tree is empty.\")\n\n header = self.document[0]\n if not isinstance(header, nodes.field_list) or \"rfc2822\" not in header[\"classes\"]:\n raise PEPParsingError(\"Document does not begin with an RFC-2822 header; it is not a PEP.\")\n\n # PEP number should be the first field\n pep_field = header[0]\n if pep_field[0].astext().lower() != \"pep\":\n raise PEPParsingError(\"Document does not contain an RFC-2822 'PEP' header!\")\n\n # Extract PEP number\n value = pep_field[1].astext()\n try:\n pep_num = int(value)\n except ValueError:\n raise PEPParsingError(f\"'PEP' header must contain an integer. '{value}' is invalid!\")\n\n # Special processing for PEP 0.\n if pep_num == 0:\n pending = nodes.pending(pep_zero.PEPZero)\n self.document.insert(1, pending)\n self.document.note_pending(pending)\n\n # If there are less than two headers in the preamble, or if Title is absent\n if len(header) < 2 or header[1][0].astext().lower() != \"title\":\n raise PEPParsingError(\"No title!\")\n\n fields_to_remove = []\n for field in header:\n name = field[0].astext().lower()\n body = field[1]\n if len(body) == 0:\n # body is empty\n continue\n elif len(body) > 1:\n msg = f\"PEP header field body contains multiple elements:\\n{field.pformat(level=1)}\"\n raise PEPParsingError(msg)\n elif not isinstance(body[0], nodes.paragraph): # len(body) == 1\n msg = f\"PEP header field body may only contain a single paragraph:\\n{field.pformat(level=1)}\"\n raise PEPParsingError(msg)\n\n para = body[0]\n if name in {\"author\", \"bdfl-delegate\", \"pep-delegate\", \"sponsor\"}:\n # mask emails\n for node in para:\n if not isinstance(node, nodes.reference):\n continue\n node.replace_self(_mask_email(node))\n elif name in {\"discussions-to\", \"resolution\", \"post-history\"}:\n # Prettify mailing list and Discourse links\n for node in para:\n if (not isinstance(node, nodes.reference)\n or not node[\"refuri\"]):\n continue\n # Have known mailto links link to their main list pages\n if node[\"refuri\"].lower().startswith(\"mailto:\"):\n node[\"refuri\"] = _generate_list_url(node[\"refuri\"])\n parts = node[\"refuri\"].lower().split(\"/\")\n if len(parts) <= 2 or parts[2] not in LINK_PRETTIFIERS:\n continue\n pretty_title = _make_link_pretty(str(node[\"refuri\"]))\n if name == \"post-history\":\n node[\"reftitle\"] = pretty_title\n else:\n node[0] = nodes.Text(pretty_title)\n elif name in {\"replaces\", \"superseded-by\", \"requires\"}:\n # replace PEP numbers with normalised list of links to PEPs\n new_body = []\n for pep_str in re.split(r\",?\\s+\", body.astext()):\n target = self.document.settings.pep_url.format(int(pep_str))\n new_body += [nodes.reference(\"\", pep_str, refuri=target), nodes.Text(\", \")]\n para[:] = new_body[:-1] # drop trailing space\n elif name in {\"last-modified\", \"content-type\", \"version\"}:\n # Mark unneeded fields\n fields_to_remove.append(field)\n\n # Remove any trailing commas and whitespace in the headers\n if para and isinstance(para[-1], nodes.Text):\n last_node = para[-1]\n if last_node.astext().strip() == \",\":\n last_node.parent.remove(last_node)\n else:\n para[-1] = last_node.rstrip().rstrip(\",\")\n\n # Remove unneeded fields\n for field in fields_to_remove:\n field.parent.remove(field)\n\n\ndef _generate_list_url(mailto: str) -> str:\n list_name_domain = mailto.lower().removeprefix(\"mailto:\").strip()\n list_name = list_name_domain.split(\"@\")[0]\n\n if list_name_domain.endswith(\"@googlegroups.com\"):\n return f\"https://groups.google.com/g/{list_name}\"\n\n if not list_name_domain.endswith(\"@python.org\"):\n return mailto\n\n # Active lists not yet on Mailman3; this URL will redirect if/when they are\n if list_name in {\"csv\", \"db-sig\", \"doc-sig\", \"python-list\", \"web-sig\"}:\n return f\"https://mail.python.org/mailman/listinfo/{list_name}\"\n # Retired lists that are closed for posting, so only the archive matters\n if list_name in {\"import-sig\", \"python-3000\"}:\n return f\"https://mail.python.org/pipermail/{list_name}/\"\n # The remaining lists (and any new ones) are all on Mailman3/Hyperkitty\n return f\"https://mail.python.org/archives/list/{list_name}@python.org/\"\n\n\ndef _process_list_url(parts: list[str]) -> tuple[str, str]:\n item_type = \"list\"\n\n # HyperKitty (Mailman3) archive structure is\n # https://mail.python.org/archives/list/<list_name>/thread/<id>\n if \"archives\" in parts:\n list_name = (\n parts[parts.index(\"archives\") + 2].removesuffix(\"@python.org\"))\n if len(parts) > 6 and parts[6] in {\"message\", \"thread\"}:\n item_type = parts[6]\n\n # Mailman3 list info structure is\n # https://mail.python.org/mailman3/lists/<list_name>.python.org/\n elif \"mailman3\" in parts:\n list_name = (\n parts[parts.index(\"mailman3\") + 2].removesuffix(\".python.org\"))\n\n # Pipermail (Mailman) archive structure is\n # https://mail.python.org/pipermail/<list_name>/<month>-<year>/<id>\n elif \"pipermail\" in parts:\n list_name = parts[parts.index(\"pipermail\") + 1]\n item_type = \"message\" if len(parts) > 6 else \"list\"\n\n # Mailman listinfo structure is\n # https://mail.python.org/mailman/listinfo/<list_name>\n elif \"listinfo\" in parts:\n list_name = parts[parts.index(\"listinfo\") + 1]\n\n # Not a link to a mailing list, message or thread\n else:\n raise ValueError(\n f\"{'/'.join(parts)} not a link to a list, message or thread\")\n\n return list_name, item_type\n\n\ndef _process_discourse_url(parts: list[str]) -> tuple[str, str]:\n item_name = \"discourse\"\n\n if len(parts) < 5 or (\"t\" not in parts and \"c\" not in parts):\n raise ValueError(\n f\"{'/'.join(parts)} not a link to a Discourse thread or category\")\n\n first_subpart = parts[4]\n has_title = not first_subpart.isnumeric()\n\n if \"t\" in parts:\n item_type = \"post\" if len(parts) > (5 + has_title) else \"thread\"\n elif \"c\" in parts:\n item_type = \"category\"\n if has_title:\n item_name = f\"{first_subpart.replace('-', ' ')} {item_name}\"\n\n return item_name, item_type\n\n\n# Domains supported for pretty URL parsing\nLINK_PRETTIFIERS = {\n \"mail.python.org\": _process_list_url,\n \"discuss.python.org\": _process_discourse_url,\n}\n\n\ndef _process_pretty_url(url: str) -> tuple[str, str]:\n parts = url.lower().strip().strip(\"/\").split(\"/\")\n try:\n item_name, item_type = LINK_PRETTIFIERS[parts[2]](parts)\n except KeyError as error:\n raise ValueError(\n f\"{url} not a link to a recognized domain to prettify\") from error\n item_name = item_name.title().replace(\"Sig\", \"SIG\").replace(\"Pep\", \"PEP\")\n return item_name, item_type\n\n\ndef _make_link_pretty(url: str) -> str:\n item_name, item_type = _process_pretty_url(url)\n return f\"{item_name} {item_type}\"\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_headers.py"}], "after_files": [{"content": "from pathlib import Path\nimport re\n\nfrom docutils import nodes\nfrom docutils import transforms\nfrom sphinx import errors\n\nfrom pep_sphinx_extensions.pep_processor.transforms import pep_zero\nfrom pep_sphinx_extensions.pep_processor.transforms.pep_zero import _mask_email\n\n\nclass PEPParsingError(errors.SphinxError):\n pass\n\n\n# PEPHeaders is identical to docutils.transforms.peps.Headers excepting bdfl-delegate, sponsor & superseeded-by\nclass PEPHeaders(transforms.Transform):\n \"\"\"Process fields in a PEP's initial RFC-2822 header.\"\"\"\n\n # Run before pep_processor.transforms.pep_title.PEPTitle\n default_priority = 330\n\n def apply(self) -> None:\n if not Path(self.document[\"source\"]).match(\"pep-*\"):\n return # not a PEP file, exit early\n\n if not len(self.document):\n raise PEPParsingError(\"Document tree is empty.\")\n\n header = self.document[0]\n if not isinstance(header, nodes.field_list) or \"rfc2822\" not in header[\"classes\"]:\n raise PEPParsingError(\"Document does not begin with an RFC-2822 header; it is not a PEP.\")\n\n # PEP number should be the first field\n pep_field = header[0]\n if pep_field[0].astext().lower() != \"pep\":\n raise PEPParsingError(\"Document does not contain an RFC-2822 'PEP' header!\")\n\n # Extract PEP number\n value = pep_field[1].astext()\n try:\n pep_num = int(value)\n except ValueError:\n raise PEPParsingError(f\"'PEP' header must contain an integer. '{value}' is invalid!\")\n\n # Special processing for PEP 0.\n if pep_num == 0:\n pending = nodes.pending(pep_zero.PEPZero)\n self.document.insert(1, pending)\n self.document.note_pending(pending)\n\n # If there are less than two headers in the preamble, or if Title is absent\n if len(header) < 2 or header[1][0].astext().lower() != \"title\":\n raise PEPParsingError(\"No title!\")\n\n fields_to_remove = []\n for field in header:\n name = field[0].astext().lower()\n body = field[1]\n if len(body) == 0:\n # body is empty\n continue\n elif len(body) > 1:\n msg = f\"PEP header field body contains multiple elements:\\n{field.pformat(level=1)}\"\n raise PEPParsingError(msg)\n elif not isinstance(body[0], nodes.paragraph): # len(body) == 1\n msg = f\"PEP header field body may only contain a single paragraph:\\n{field.pformat(level=1)}\"\n raise PEPParsingError(msg)\n\n para = body[0]\n if name in {\"author\", \"bdfl-delegate\", \"pep-delegate\", \"sponsor\"}:\n # mask emails\n for node in para:\n if not isinstance(node, nodes.reference):\n continue\n node.replace_self(_mask_email(node))\n elif name in {\"discussions-to\", \"resolution\", \"post-history\"}:\n # Prettify mailing list and Discourse links\n for node in para:\n if (not isinstance(node, nodes.reference)\n or not node[\"refuri\"]):\n continue\n # Have known mailto links link to their main list pages\n if node[\"refuri\"].lower().startswith(\"mailto:\"):\n node[\"refuri\"] = _generate_list_url(node[\"refuri\"])\n parts = node[\"refuri\"].lower().split(\"/\")\n if len(parts) <= 2 or parts[2] not in LINK_PRETTIFIERS:\n continue\n pretty_title = _make_link_pretty(str(node[\"refuri\"]))\n if name == \"post-history\":\n node[\"reftitle\"] = pretty_title\n else:\n node[0] = nodes.Text(pretty_title)\n elif name in {\"replaces\", \"superseded-by\", \"requires\"}:\n # replace PEP numbers with normalised list of links to PEPs\n new_body = []\n for pep_str in re.split(r\",?\\s+\", body.astext()):\n target = self.document.settings.pep_url.format(int(pep_str))\n new_body += [nodes.reference(\"\", pep_str, refuri=target), nodes.Text(\", \")]\n para[:] = new_body[:-1] # drop trailing space\n elif name == \"topic\":\n new_body = []\n for topic_name in body.astext().split(\",\"):\n if topic_name:\n target = f\"/topic/{topic_name.lower().strip()}/\"\n new_body += [\n nodes.reference(\"\", topic_name, refuri=target),\n nodes.Text(\", \"),\n ]\n if new_body:\n para[:] = new_body[:-1] # Drop trailing space/comma\n elif name in {\"last-modified\", \"content-type\", \"version\"}:\n # Mark unneeded fields\n fields_to_remove.append(field)\n\n # Remove any trailing commas and whitespace in the headers\n if para and isinstance(para[-1], nodes.Text):\n last_node = para[-1]\n if last_node.astext().strip() == \",\":\n last_node.parent.remove(last_node)\n else:\n para[-1] = last_node.rstrip().rstrip(\",\")\n\n # Remove unneeded fields\n for field in fields_to_remove:\n field.parent.remove(field)\n\n\ndef _generate_list_url(mailto: str) -> str:\n list_name_domain = mailto.lower().removeprefix(\"mailto:\").strip()\n list_name = list_name_domain.split(\"@\")[0]\n\n if list_name_domain.endswith(\"@googlegroups.com\"):\n return f\"https://groups.google.com/g/{list_name}\"\n\n if not list_name_domain.endswith(\"@python.org\"):\n return mailto\n\n # Active lists not yet on Mailman3; this URL will redirect if/when they are\n if list_name in {\"csv\", \"db-sig\", \"doc-sig\", \"python-list\", \"web-sig\"}:\n return f\"https://mail.python.org/mailman/listinfo/{list_name}\"\n # Retired lists that are closed for posting, so only the archive matters\n if list_name in {\"import-sig\", \"python-3000\"}:\n return f\"https://mail.python.org/pipermail/{list_name}/\"\n # The remaining lists (and any new ones) are all on Mailman3/Hyperkitty\n return f\"https://mail.python.org/archives/list/{list_name}@python.org/\"\n\n\ndef _process_list_url(parts: list[str]) -> tuple[str, str]:\n item_type = \"list\"\n\n # HyperKitty (Mailman3) archive structure is\n # https://mail.python.org/archives/list/<list_name>/thread/<id>\n if \"archives\" in parts:\n list_name = (\n parts[parts.index(\"archives\") + 2].removesuffix(\"@python.org\"))\n if len(parts) > 6 and parts[6] in {\"message\", \"thread\"}:\n item_type = parts[6]\n\n # Mailman3 list info structure is\n # https://mail.python.org/mailman3/lists/<list_name>.python.org/\n elif \"mailman3\" in parts:\n list_name = (\n parts[parts.index(\"mailman3\") + 2].removesuffix(\".python.org\"))\n\n # Pipermail (Mailman) archive structure is\n # https://mail.python.org/pipermail/<list_name>/<month>-<year>/<id>\n elif \"pipermail\" in parts:\n list_name = parts[parts.index(\"pipermail\") + 1]\n item_type = \"message\" if len(parts) > 6 else \"list\"\n\n # Mailman listinfo structure is\n # https://mail.python.org/mailman/listinfo/<list_name>\n elif \"listinfo\" in parts:\n list_name = parts[parts.index(\"listinfo\") + 1]\n\n # Not a link to a mailing list, message or thread\n else:\n raise ValueError(\n f\"{'/'.join(parts)} not a link to a list, message or thread\")\n\n return list_name, item_type\n\n\ndef _process_discourse_url(parts: list[str]) -> tuple[str, str]:\n item_name = \"discourse\"\n\n if len(parts) < 5 or (\"t\" not in parts and \"c\" not in parts):\n raise ValueError(\n f\"{'/'.join(parts)} not a link to a Discourse thread or category\")\n\n first_subpart = parts[4]\n has_title = not first_subpart.isnumeric()\n\n if \"t\" in parts:\n item_type = \"post\" if len(parts) > (5 + has_title) else \"thread\"\n elif \"c\" in parts:\n item_type = \"category\"\n if has_title:\n item_name = f\"{first_subpart.replace('-', ' ')} {item_name}\"\n\n return item_name, item_type\n\n\n# Domains supported for pretty URL parsing\nLINK_PRETTIFIERS = {\n \"mail.python.org\": _process_list_url,\n \"discuss.python.org\": _process_discourse_url,\n}\n\n\ndef _process_pretty_url(url: str) -> tuple[str, str]:\n parts = url.lower().strip().strip(\"/\").split(\"/\")\n try:\n item_name, item_type = LINK_PRETTIFIERS[parts[2]](parts)\n except KeyError as error:\n raise ValueError(\n f\"{url} not a link to a recognized domain to prettify\") from error\n item_name = item_name.title().replace(\"Sig\", \"SIG\").replace(\"Pep\", \"PEP\")\n return item_name, item_type\n\n\ndef _make_link_pretty(url: str) -> str:\n item_name, item_type = _process_pretty_url(url)\n return f\"{item_name} {item_type}\"\n", "path": "pep_sphinx_extensions/pep_processor/transforms/pep_headers.py"}]}
| 2,978 | 274 |
gh_patches_debug_9538
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-481
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't run in Travis CI xenial
I was testing the new Travis CI image:
https://blog.travis-ci.com/2018-11-08-xenial-release
https://docs.travis-ci.com/user/reference/xenial/
My install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:
```Traceback (most recent call last):
File "/home/travis/.local/bin/cfn-lint", line 7, in <module>
from cfnlint.__main__ import main
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py", line 19, in <module>
import cfnlint.core
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py", line 24, in <module>
import cfnlint.maintenance
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py", line 19, in <module>
import requests
File "/usr/lib/python2.7/dist-packages/requests/__init__.py", line 58, in <module>
from . import utils
File "/usr/lib/python2.7/dist-packages/requests/utils.py", line 25, in <module>
from . import certs
ImportError: cannot import name certs
```
I "fixed" this problem by just changing to Python3 in my build script:
```
pyenv global 3.7
pip3 install --user --upgrade cfn-lint
cfn-lint stack.cfn.yaml
```
But there might be some other lingering issue here
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import codecs
18 import re
19 from setuptools import find_packages
20 from setuptools import setup
21
22
23 def get_version(filename):
24 with codecs.open(filename, 'r', 'utf-8') as fp:
25 contents = fp.read()
26 return re.search(r"__version__ = ['\"]([^'\"]+)['\"]", contents).group(1)
27
28
29 version = get_version('src/cfnlint/version.py')
30
31
32 with open('README.md') as f:
33 readme = f.read()
34
35 setup(
36 name='cfn-lint',
37 version=version,
38 description=('checks cloudformation for practices and behaviour \
39 that could potentially be improved'),
40 long_description=readme,
41 long_description_content_type="text/markdown",
42 keywords='aws, lint',
43 author='kddejong',
44 author_email='[email protected]',
45 url='https://github.com/awslabs/cfn-python-lint',
46 package_dir={'': 'src'},
47 package_data={'cfnlint': [
48 'data/CloudSpecs/*.json',
49 'data/AdditionalSpecs/*.json',
50 'data/Serverless/*.json',
51 'data/CfnLintCli/config/schema.json'
52 ]},
53 packages=find_packages('src'),
54 zip_safe=False,
55 install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
56 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
57 entry_points={
58 'console_scripts': [
59 'cfn-lint = cfnlint.__main__:main'
60 ]
61 },
62 license='MIT no attribution',
63 test_suite="unittest",
64 classifiers=[
65 'Development Status :: 5 - Production/Stable',
66 'Intended Audience :: Developers',
67 'License :: OSI Approved :: MIT License',
68 'Natural Language :: English',
69 'Operating System :: OS Independent',
70 'Programming Language :: Python :: 2',
71 'Programming Language :: Python :: 2.7',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.4',
74 'Programming Language :: Python :: 3.5',
75 'Programming Language :: Python :: 3.6',
76 ],
77 )
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,15 @@
]},
packages=find_packages('src'),
zip_safe=False,
- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
+ install_requires=[
+ 'pyyaml',
+ 'six~=1.11',
+ 'requests>=2.15.0',
+ 'aws-sam-translator>=1.8.0',
+ 'jsonpatch',
+ 'jsonschema~=2.6',
+ 'pathlib2>=2.3.0;python_version<"3.4"'
+ ],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
entry_points={
'console_scripts': [
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,15 @@\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n+ install_requires=[\n+ 'pyyaml',\n+ 'six~=1.11',\n+ 'requests>=2.15.0',\n+ 'aws-sam-translator>=1.8.0',\n+ 'jsonpatch',\n+ 'jsonschema~=2.6',\n+ 'pathlib2>=2.3.0;python_version<\"3.4\"'\n+ ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n", "issue": "Can't run in Travis CI xenial\nI was testing the new Travis CI image:\r\n\r\nhttps://blog.travis-ci.com/2018-11-08-xenial-release\r\nhttps://docs.travis-ci.com/user/reference/xenial/\r\n\r\nMy install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:\r\n\r\n```Traceback (most recent call last):\r\n File \"/home/travis/.local/bin/cfn-lint\", line 7, in <module>\r\n from cfnlint.__main__ import main\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py\", line 19, in <module>\r\n import cfnlint.core\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py\", line 24, in <module>\r\n import cfnlint.maintenance\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py\", line 19, in <module>\r\n import requests\r\n File \"/usr/lib/python2.7/dist-packages/requests/__init__.py\", line 58, in <module>\r\n from . import utils\r\n File \"/usr/lib/python2.7/dist-packages/requests/utils.py\", line 25, in <module>\r\n from . import certs\r\nImportError: cannot import name certs\r\n```\r\n\r\nI \"fixed\" this problem by just changing to Python3 in my build script:\r\n\r\n```\r\npyenv global 3.7\r\npip3 install --user --upgrade cfn-lint\r\ncfn-lint stack.cfn.yaml\r\n```\r\n\r\nBut there might be some other lingering issue here\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/awslabs/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/awslabs/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=[\n 'pyyaml',\n 'six~=1.11',\n 'requests>=2.15.0',\n 'aws-sam-translator>=1.8.0',\n 'jsonpatch',\n 'jsonschema~=2.6',\n 'pathlib2>=2.3.0;python_version<\"3.4\"'\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]}
| 1,488 | 221 |
gh_patches_debug_30618
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-4191
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improvements for content->host-block-lists
- [ ] Handle local directories (read every file in the directory)
- [ ] Handle local paths without `file://`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/browser/adblock.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Functions related to ad blocking."""
21
22 import io
23 import os.path
24 import functools
25 import posixpath
26 import zipfile
27
28 from qutebrowser.browser import downloads
29 from qutebrowser.config import config
30 from qutebrowser.utils import objreg, standarddir, log, message
31 from qutebrowser.commands import cmdutils
32
33
34 def _guess_zip_filename(zf):
35 """Guess which file to use inside a zip file.
36
37 Args:
38 zf: A ZipFile instance.
39 """
40 files = zf.namelist()
41 if len(files) == 1:
42 return files[0]
43 else:
44 for e in files:
45 if posixpath.splitext(e)[0].lower() == 'hosts':
46 return e
47 raise FileNotFoundError("No hosts file found in zip")
48
49
50 def get_fileobj(byte_io):
51 """Get a usable file object to read the hosts file from."""
52 byte_io.seek(0) # rewind downloaded file
53 if zipfile.is_zipfile(byte_io):
54 byte_io.seek(0) # rewind what zipfile.is_zipfile did
55 zf = zipfile.ZipFile(byte_io)
56 filename = _guess_zip_filename(zf)
57 byte_io = zf.open(filename, mode='r')
58 else:
59 byte_io.seek(0) # rewind what zipfile.is_zipfile did
60 return byte_io
61
62
63 def _is_whitelisted_url(url):
64 """Check if the given URL is on the adblock whitelist.
65
66 Args:
67 url: The URL to check as QUrl.
68 """
69 for pattern in config.val.content.host_blocking.whitelist:
70 if pattern.matches(url):
71 return True
72 return False
73
74
75 class _FakeDownload:
76
77 """A download stub to use on_download_finished with local files."""
78
79 def __init__(self, fileobj):
80 self.basename = os.path.basename(fileobj.name)
81 self.fileobj = fileobj
82 self.successful = True
83
84
85 class HostBlocker:
86
87 """Manage blocked hosts based from /etc/hosts-like files.
88
89 Attributes:
90 _blocked_hosts: A set of blocked hosts.
91 _config_blocked_hosts: A set of blocked hosts from ~/.config.
92 _in_progress: The DownloadItems which are currently downloading.
93 _done_count: How many files have been read successfully.
94 _local_hosts_file: The path to the blocked-hosts file.
95 _config_hosts_file: The path to a blocked-hosts in ~/.config
96 """
97
98 def __init__(self):
99 self._blocked_hosts = set()
100 self._config_blocked_hosts = set()
101 self._in_progress = []
102 self._done_count = 0
103
104 data_dir = standarddir.data()
105 self._local_hosts_file = os.path.join(data_dir, 'blocked-hosts')
106 self._update_files()
107
108 config_dir = standarddir.config()
109 self._config_hosts_file = os.path.join(config_dir, 'blocked-hosts')
110
111 config.instance.changed.connect(self._update_files)
112
113 def is_blocked(self, url):
114 """Check if the given URL (as QUrl) is blocked."""
115 if not config.val.content.host_blocking.enabled:
116 return False
117 host = url.host()
118 return ((host in self._blocked_hosts or
119 host in self._config_blocked_hosts) and
120 not _is_whitelisted_url(url))
121
122 def _read_hosts_file(self, filename, target):
123 """Read hosts from the given filename.
124
125 Args:
126 filename: The file to read.
127 target: The set to store the hosts in.
128
129 Return:
130 True if a read was attempted, False otherwise
131 """
132 if not os.path.exists(filename):
133 return False
134
135 try:
136 with open(filename, 'r', encoding='utf-8') as f:
137 for line in f:
138 target.add(line.strip())
139 except (OSError, UnicodeDecodeError):
140 log.misc.exception("Failed to read host blocklist!")
141
142 return True
143
144 def read_hosts(self):
145 """Read hosts from the existing blocked-hosts file."""
146 self._blocked_hosts = set()
147
148 self._read_hosts_file(self._config_hosts_file,
149 self._config_blocked_hosts)
150
151 found = self._read_hosts_file(self._local_hosts_file,
152 self._blocked_hosts)
153
154 if not found:
155 args = objreg.get('args')
156 if (config.val.content.host_blocking.lists and
157 args.basedir is None and
158 config.val.content.host_blocking.enabled):
159 message.info("Run :adblock-update to get adblock lists.")
160
161 @cmdutils.register(instance='host-blocker')
162 def adblock_update(self):
163 """Update the adblock block lists.
164
165 This updates `~/.local/share/qutebrowser/blocked-hosts` with downloaded
166 host lists and re-reads `~/.config/qutebrowser/blocked-hosts`.
167 """
168 self._read_hosts_file(self._config_hosts_file,
169 self._config_blocked_hosts)
170 self._blocked_hosts = set()
171 self._done_count = 0
172 download_manager = objreg.get('qtnetwork-download-manager')
173 for url in config.val.content.host_blocking.lists:
174 if url.scheme() == 'file':
175 filename = url.toLocalFile()
176 try:
177 fileobj = open(filename, 'rb')
178 except OSError as e:
179 message.error("adblock: Error while reading {}: {}".format(
180 filename, e.strerror))
181 continue
182 download = _FakeDownload(fileobj)
183 self._in_progress.append(download)
184 self._on_download_finished(download)
185 else:
186 fobj = io.BytesIO()
187 fobj.name = 'adblock: ' + url.host()
188 target = downloads.FileObjDownloadTarget(fobj)
189 download = download_manager.get(url, target=target,
190 auto_remove=True)
191 self._in_progress.append(download)
192 download.finished.connect(
193 functools.partial(self._on_download_finished, download))
194
195 def _parse_line(self, line):
196 """Parse a line from a host file.
197
198 Args:
199 line: The bytes object to parse.
200
201 Returns:
202 True if parsing succeeded, False otherwise.
203 """
204 if line.startswith(b'#'):
205 # Ignoring comments early so we don't have to care about
206 # encoding errors in them.
207 return True
208
209 try:
210 line = line.decode('utf-8')
211 except UnicodeDecodeError:
212 log.misc.error("Failed to decode: {!r}".format(line))
213 return False
214
215 # Remove comments
216 try:
217 hash_idx = line.index('#')
218 line = line[:hash_idx]
219 except ValueError:
220 pass
221
222 line = line.strip()
223 # Skip empty lines
224 if not line:
225 return True
226
227 parts = line.split()
228 if len(parts) == 1:
229 # "one host per line" format
230 hosts = [parts[0]]
231 else:
232 # /etc/hosts format
233 hosts = parts[1:]
234
235 for host in hosts:
236 if ('.' in host and
237 not host.endswith('.localdomain') and
238 host != '0.0.0.0'):
239 self._blocked_hosts.add(host)
240
241 return True
242
243 def _merge_file(self, byte_io):
244 """Read and merge host files.
245
246 Args:
247 byte_io: The BytesIO object of the completed download.
248
249 Return:
250 A set of the merged hosts.
251 """
252 error_count = 0
253 line_count = 0
254 try:
255 f = get_fileobj(byte_io)
256 except (OSError, zipfile.BadZipFile, zipfile.LargeZipFile,
257 LookupError) as e:
258 message.error("adblock: Error while reading {}: {} - {}".format(
259 byte_io.name, e.__class__.__name__, e))
260 return
261
262 for line in f:
263 line_count += 1
264 ok = self._parse_line(line)
265 if not ok:
266 error_count += 1
267
268 log.misc.debug("{}: read {} lines".format(byte_io.name, line_count))
269 if error_count > 0:
270 message.error("adblock: {} read errors for {}".format(
271 error_count, byte_io.name))
272
273 def _on_lists_downloaded(self):
274 """Install block lists after files have been downloaded."""
275 with open(self._local_hosts_file, 'w', encoding='utf-8') as f:
276 for host in sorted(self._blocked_hosts):
277 f.write(host + '\n')
278 message.info("adblock: Read {} hosts from {} sources.".format(
279 len(self._blocked_hosts), self._done_count))
280
281 @config.change_filter('content.host_blocking.lists')
282 def _update_files(self):
283 """Update files when the config changed."""
284 if not config.val.content.host_blocking.lists:
285 try:
286 os.remove(self._local_hosts_file)
287 except FileNotFoundError:
288 pass
289 except OSError as e:
290 log.misc.exception("Failed to delete hosts file: {}".format(e))
291
292 def _on_download_finished(self, download):
293 """Check if all downloads are finished and if so, trigger reading.
294
295 Arguments:
296 download: The finished DownloadItem.
297 """
298 self._in_progress.remove(download)
299 if download.successful:
300 self._done_count += 1
301 try:
302 self._merge_file(download.fileobj)
303 finally:
304 download.fileobj.close()
305 if not self._in_progress:
306 try:
307 self._on_lists_downloaded()
308 except OSError:
309 log.misc.exception("Failed to write host block list!")
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qutebrowser/browser/adblock.py b/qutebrowser/browser/adblock.py
--- a/qutebrowser/browser/adblock.py
+++ b/qutebrowser/browser/adblock.py
@@ -173,15 +173,12 @@
for url in config.val.content.host_blocking.lists:
if url.scheme() == 'file':
filename = url.toLocalFile()
- try:
- fileobj = open(filename, 'rb')
- except OSError as e:
- message.error("adblock: Error while reading {}: {}".format(
- filename, e.strerror))
- continue
- download = _FakeDownload(fileobj)
- self._in_progress.append(download)
- self._on_download_finished(download)
+ if os.path.isdir(filename):
+ for filenames in os.scandir(filename):
+ if filenames.is_file():
+ self._import_local(filenames.path)
+ else:
+ self._import_local(filename)
else:
fobj = io.BytesIO()
fobj.name = 'adblock: ' + url.host()
@@ -192,6 +189,22 @@
download.finished.connect(
functools.partial(self._on_download_finished, download))
+ def _import_local(self, filename):
+ """Adds the contents of a file to the blocklist.
+
+ Args:
+ filename: path to a local file to import.
+ """
+ try:
+ fileobj = open(filename, 'rb')
+ except OSError as e:
+ message.error("adblock: Error while reading {}: {}".format(
+ filename, e.strerror))
+ return
+ download = _FakeDownload(fileobj)
+ self._in_progress.append(download)
+ self._on_download_finished(download)
+
def _parse_line(self, line):
"""Parse a line from a host file.
|
{"golden_diff": "diff --git a/qutebrowser/browser/adblock.py b/qutebrowser/browser/adblock.py\n--- a/qutebrowser/browser/adblock.py\n+++ b/qutebrowser/browser/adblock.py\n@@ -173,15 +173,12 @@\n for url in config.val.content.host_blocking.lists:\n if url.scheme() == 'file':\n filename = url.toLocalFile()\n- try:\n- fileobj = open(filename, 'rb')\n- except OSError as e:\n- message.error(\"adblock: Error while reading {}: {}\".format(\n- filename, e.strerror))\n- continue\n- download = _FakeDownload(fileobj)\n- self._in_progress.append(download)\n- self._on_download_finished(download)\n+ if os.path.isdir(filename):\n+ for filenames in os.scandir(filename):\n+ if filenames.is_file():\n+ self._import_local(filenames.path)\n+ else:\n+ self._import_local(filename)\n else:\n fobj = io.BytesIO()\n fobj.name = 'adblock: ' + url.host()\n@@ -192,6 +189,22 @@\n download.finished.connect(\n functools.partial(self._on_download_finished, download))\n \n+ def _import_local(self, filename):\n+ \"\"\"Adds the contents of a file to the blocklist.\n+\n+ Args:\n+ filename: path to a local file to import.\n+ \"\"\"\n+ try:\n+ fileobj = open(filename, 'rb')\n+ except OSError as e:\n+ message.error(\"adblock: Error while reading {}: {}\".format(\n+ filename, e.strerror))\n+ return\n+ download = _FakeDownload(fileobj)\n+ self._in_progress.append(download)\n+ self._on_download_finished(download)\n+\n def _parse_line(self, line):\n \"\"\"Parse a line from a host file.\n", "issue": "Improvements for content->host-block-lists\n- [ ] Handle local directories (read every file in the directory)\n- [ ] Handle local paths without `file://`\n\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions related to ad blocking.\"\"\"\n\nimport io\nimport os.path\nimport functools\nimport posixpath\nimport zipfile\n\nfrom qutebrowser.browser import downloads\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import objreg, standarddir, log, message\nfrom qutebrowser.commands import cmdutils\n\n\ndef _guess_zip_filename(zf):\n \"\"\"Guess which file to use inside a zip file.\n\n Args:\n zf: A ZipFile instance.\n \"\"\"\n files = zf.namelist()\n if len(files) == 1:\n return files[0]\n else:\n for e in files:\n if posixpath.splitext(e)[0].lower() == 'hosts':\n return e\n raise FileNotFoundError(\"No hosts file found in zip\")\n\n\ndef get_fileobj(byte_io):\n \"\"\"Get a usable file object to read the hosts file from.\"\"\"\n byte_io.seek(0) # rewind downloaded file\n if zipfile.is_zipfile(byte_io):\n byte_io.seek(0) # rewind what zipfile.is_zipfile did\n zf = zipfile.ZipFile(byte_io)\n filename = _guess_zip_filename(zf)\n byte_io = zf.open(filename, mode='r')\n else:\n byte_io.seek(0) # rewind what zipfile.is_zipfile did\n return byte_io\n\n\ndef _is_whitelisted_url(url):\n \"\"\"Check if the given URL is on the adblock whitelist.\n\n Args:\n url: The URL to check as QUrl.\n \"\"\"\n for pattern in config.val.content.host_blocking.whitelist:\n if pattern.matches(url):\n return True\n return False\n\n\nclass _FakeDownload:\n\n \"\"\"A download stub to use on_download_finished with local files.\"\"\"\n\n def __init__(self, fileobj):\n self.basename = os.path.basename(fileobj.name)\n self.fileobj = fileobj\n self.successful = True\n\n\nclass HostBlocker:\n\n \"\"\"Manage blocked hosts based from /etc/hosts-like files.\n\n Attributes:\n _blocked_hosts: A set of blocked hosts.\n _config_blocked_hosts: A set of blocked hosts from ~/.config.\n _in_progress: The DownloadItems which are currently downloading.\n _done_count: How many files have been read successfully.\n _local_hosts_file: The path to the blocked-hosts file.\n _config_hosts_file: The path to a blocked-hosts in ~/.config\n \"\"\"\n\n def __init__(self):\n self._blocked_hosts = set()\n self._config_blocked_hosts = set()\n self._in_progress = []\n self._done_count = 0\n\n data_dir = standarddir.data()\n self._local_hosts_file = os.path.join(data_dir, 'blocked-hosts')\n self._update_files()\n\n config_dir = standarddir.config()\n self._config_hosts_file = os.path.join(config_dir, 'blocked-hosts')\n\n config.instance.changed.connect(self._update_files)\n\n def is_blocked(self, url):\n \"\"\"Check if the given URL (as QUrl) is blocked.\"\"\"\n if not config.val.content.host_blocking.enabled:\n return False\n host = url.host()\n return ((host in self._blocked_hosts or\n host in self._config_blocked_hosts) and\n not _is_whitelisted_url(url))\n\n def _read_hosts_file(self, filename, target):\n \"\"\"Read hosts from the given filename.\n\n Args:\n filename: The file to read.\n target: The set to store the hosts in.\n\n Return:\n True if a read was attempted, False otherwise\n \"\"\"\n if not os.path.exists(filename):\n return False\n\n try:\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n target.add(line.strip())\n except (OSError, UnicodeDecodeError):\n log.misc.exception(\"Failed to read host blocklist!\")\n\n return True\n\n def read_hosts(self):\n \"\"\"Read hosts from the existing blocked-hosts file.\"\"\"\n self._blocked_hosts = set()\n\n self._read_hosts_file(self._config_hosts_file,\n self._config_blocked_hosts)\n\n found = self._read_hosts_file(self._local_hosts_file,\n self._blocked_hosts)\n\n if not found:\n args = objreg.get('args')\n if (config.val.content.host_blocking.lists and\n args.basedir is None and\n config.val.content.host_blocking.enabled):\n message.info(\"Run :adblock-update to get adblock lists.\")\n\n @cmdutils.register(instance='host-blocker')\n def adblock_update(self):\n \"\"\"Update the adblock block lists.\n\n This updates `~/.local/share/qutebrowser/blocked-hosts` with downloaded\n host lists and re-reads `~/.config/qutebrowser/blocked-hosts`.\n \"\"\"\n self._read_hosts_file(self._config_hosts_file,\n self._config_blocked_hosts)\n self._blocked_hosts = set()\n self._done_count = 0\n download_manager = objreg.get('qtnetwork-download-manager')\n for url in config.val.content.host_blocking.lists:\n if url.scheme() == 'file':\n filename = url.toLocalFile()\n try:\n fileobj = open(filename, 'rb')\n except OSError as e:\n message.error(\"adblock: Error while reading {}: {}\".format(\n filename, e.strerror))\n continue\n download = _FakeDownload(fileobj)\n self._in_progress.append(download)\n self._on_download_finished(download)\n else:\n fobj = io.BytesIO()\n fobj.name = 'adblock: ' + url.host()\n target = downloads.FileObjDownloadTarget(fobj)\n download = download_manager.get(url, target=target,\n auto_remove=True)\n self._in_progress.append(download)\n download.finished.connect(\n functools.partial(self._on_download_finished, download))\n\n def _parse_line(self, line):\n \"\"\"Parse a line from a host file.\n\n Args:\n line: The bytes object to parse.\n\n Returns:\n True if parsing succeeded, False otherwise.\n \"\"\"\n if line.startswith(b'#'):\n # Ignoring comments early so we don't have to care about\n # encoding errors in them.\n return True\n\n try:\n line = line.decode('utf-8')\n except UnicodeDecodeError:\n log.misc.error(\"Failed to decode: {!r}\".format(line))\n return False\n\n # Remove comments\n try:\n hash_idx = line.index('#')\n line = line[:hash_idx]\n except ValueError:\n pass\n\n line = line.strip()\n # Skip empty lines\n if not line:\n return True\n\n parts = line.split()\n if len(parts) == 1:\n # \"one host per line\" format\n hosts = [parts[0]]\n else:\n # /etc/hosts format\n hosts = parts[1:]\n\n for host in hosts:\n if ('.' in host and\n not host.endswith('.localdomain') and\n host != '0.0.0.0'):\n self._blocked_hosts.add(host)\n\n return True\n\n def _merge_file(self, byte_io):\n \"\"\"Read and merge host files.\n\n Args:\n byte_io: The BytesIO object of the completed download.\n\n Return:\n A set of the merged hosts.\n \"\"\"\n error_count = 0\n line_count = 0\n try:\n f = get_fileobj(byte_io)\n except (OSError, zipfile.BadZipFile, zipfile.LargeZipFile,\n LookupError) as e:\n message.error(\"adblock: Error while reading {}: {} - {}\".format(\n byte_io.name, e.__class__.__name__, e))\n return\n\n for line in f:\n line_count += 1\n ok = self._parse_line(line)\n if not ok:\n error_count += 1\n\n log.misc.debug(\"{}: read {} lines\".format(byte_io.name, line_count))\n if error_count > 0:\n message.error(\"adblock: {} read errors for {}\".format(\n error_count, byte_io.name))\n\n def _on_lists_downloaded(self):\n \"\"\"Install block lists after files have been downloaded.\"\"\"\n with open(self._local_hosts_file, 'w', encoding='utf-8') as f:\n for host in sorted(self._blocked_hosts):\n f.write(host + '\\n')\n message.info(\"adblock: Read {} hosts from {} sources.\".format(\n len(self._blocked_hosts), self._done_count))\n\n @config.change_filter('content.host_blocking.lists')\n def _update_files(self):\n \"\"\"Update files when the config changed.\"\"\"\n if not config.val.content.host_blocking.lists:\n try:\n os.remove(self._local_hosts_file)\n except FileNotFoundError:\n pass\n except OSError as e:\n log.misc.exception(\"Failed to delete hosts file: {}\".format(e))\n\n def _on_download_finished(self, download):\n \"\"\"Check if all downloads are finished and if so, trigger reading.\n\n Arguments:\n download: The finished DownloadItem.\n \"\"\"\n self._in_progress.remove(download)\n if download.successful:\n self._done_count += 1\n try:\n self._merge_file(download.fileobj)\n finally:\n download.fileobj.close()\n if not self._in_progress:\n try:\n self._on_lists_downloaded()\n except OSError:\n log.misc.exception(\"Failed to write host block list!\")\n", "path": "qutebrowser/browser/adblock.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions related to ad blocking.\"\"\"\n\nimport io\nimport os.path\nimport functools\nimport posixpath\nimport zipfile\n\nfrom qutebrowser.browser import downloads\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import objreg, standarddir, log, message\nfrom qutebrowser.commands import cmdutils\n\n\ndef _guess_zip_filename(zf):\n \"\"\"Guess which file to use inside a zip file.\n\n Args:\n zf: A ZipFile instance.\n \"\"\"\n files = zf.namelist()\n if len(files) == 1:\n return files[0]\n else:\n for e in files:\n if posixpath.splitext(e)[0].lower() == 'hosts':\n return e\n raise FileNotFoundError(\"No hosts file found in zip\")\n\n\ndef get_fileobj(byte_io):\n \"\"\"Get a usable file object to read the hosts file from.\"\"\"\n byte_io.seek(0) # rewind downloaded file\n if zipfile.is_zipfile(byte_io):\n byte_io.seek(0) # rewind what zipfile.is_zipfile did\n zf = zipfile.ZipFile(byte_io)\n filename = _guess_zip_filename(zf)\n byte_io = zf.open(filename, mode='r')\n else:\n byte_io.seek(0) # rewind what zipfile.is_zipfile did\n return byte_io\n\n\ndef _is_whitelisted_url(url):\n \"\"\"Check if the given URL is on the adblock whitelist.\n\n Args:\n url: The URL to check as QUrl.\n \"\"\"\n for pattern in config.val.content.host_blocking.whitelist:\n if pattern.matches(url):\n return True\n return False\n\n\nclass _FakeDownload:\n\n \"\"\"A download stub to use on_download_finished with local files.\"\"\"\n\n def __init__(self, fileobj):\n self.basename = os.path.basename(fileobj.name)\n self.fileobj = fileobj\n self.successful = True\n\n\nclass HostBlocker:\n\n \"\"\"Manage blocked hosts based from /etc/hosts-like files.\n\n Attributes:\n _blocked_hosts: A set of blocked hosts.\n _config_blocked_hosts: A set of blocked hosts from ~/.config.\n _in_progress: The DownloadItems which are currently downloading.\n _done_count: How many files have been read successfully.\n _local_hosts_file: The path to the blocked-hosts file.\n _config_hosts_file: The path to a blocked-hosts in ~/.config\n \"\"\"\n\n def __init__(self):\n self._blocked_hosts = set()\n self._config_blocked_hosts = set()\n self._in_progress = []\n self._done_count = 0\n\n data_dir = standarddir.data()\n self._local_hosts_file = os.path.join(data_dir, 'blocked-hosts')\n self._update_files()\n\n config_dir = standarddir.config()\n self._config_hosts_file = os.path.join(config_dir, 'blocked-hosts')\n\n config.instance.changed.connect(self._update_files)\n\n def is_blocked(self, url):\n \"\"\"Check if the given URL (as QUrl) is blocked.\"\"\"\n if not config.val.content.host_blocking.enabled:\n return False\n host = url.host()\n return ((host in self._blocked_hosts or\n host in self._config_blocked_hosts) and\n not _is_whitelisted_url(url))\n\n def _read_hosts_file(self, filename, target):\n \"\"\"Read hosts from the given filename.\n\n Args:\n filename: The file to read.\n target: The set to store the hosts in.\n\n Return:\n True if a read was attempted, False otherwise\n \"\"\"\n if not os.path.exists(filename):\n return False\n\n try:\n with open(filename, 'r', encoding='utf-8') as f:\n for line in f:\n target.add(line.strip())\n except (OSError, UnicodeDecodeError):\n log.misc.exception(\"Failed to read host blocklist!\")\n\n return True\n\n def read_hosts(self):\n \"\"\"Read hosts from the existing blocked-hosts file.\"\"\"\n self._blocked_hosts = set()\n\n self._read_hosts_file(self._config_hosts_file,\n self._config_blocked_hosts)\n\n found = self._read_hosts_file(self._local_hosts_file,\n self._blocked_hosts)\n\n if not found:\n args = objreg.get('args')\n if (config.val.content.host_blocking.lists and\n args.basedir is None and\n config.val.content.host_blocking.enabled):\n message.info(\"Run :adblock-update to get adblock lists.\")\n\n @cmdutils.register(instance='host-blocker')\n def adblock_update(self):\n \"\"\"Update the adblock block lists.\n\n This updates `~/.local/share/qutebrowser/blocked-hosts` with downloaded\n host lists and re-reads `~/.config/qutebrowser/blocked-hosts`.\n \"\"\"\n self._read_hosts_file(self._config_hosts_file,\n self._config_blocked_hosts)\n self._blocked_hosts = set()\n self._done_count = 0\n download_manager = objreg.get('qtnetwork-download-manager')\n for url in config.val.content.host_blocking.lists:\n if url.scheme() == 'file':\n filename = url.toLocalFile()\n if os.path.isdir(filename):\n for filenames in os.scandir(filename):\n if filenames.is_file():\n self._import_local(filenames.path)\n else:\n self._import_local(filename)\n else:\n fobj = io.BytesIO()\n fobj.name = 'adblock: ' + url.host()\n target = downloads.FileObjDownloadTarget(fobj)\n download = download_manager.get(url, target=target,\n auto_remove=True)\n self._in_progress.append(download)\n download.finished.connect(\n functools.partial(self._on_download_finished, download))\n\n def _import_local(self, filename):\n \"\"\"Adds the contents of a file to the blocklist.\n\n Args:\n filename: path to a local file to import.\n \"\"\"\n try:\n fileobj = open(filename, 'rb')\n except OSError as e:\n message.error(\"adblock: Error while reading {}: {}\".format(\n filename, e.strerror))\n return\n download = _FakeDownload(fileobj)\n self._in_progress.append(download)\n self._on_download_finished(download)\n\n def _parse_line(self, line):\n \"\"\"Parse a line from a host file.\n\n Args:\n line: The bytes object to parse.\n\n Returns:\n True if parsing succeeded, False otherwise.\n \"\"\"\n if line.startswith(b'#'):\n # Ignoring comments early so we don't have to care about\n # encoding errors in them.\n return True\n\n try:\n line = line.decode('utf-8')\n except UnicodeDecodeError:\n log.misc.error(\"Failed to decode: {!r}\".format(line))\n return False\n\n # Remove comments\n try:\n hash_idx = line.index('#')\n line = line[:hash_idx]\n except ValueError:\n pass\n\n line = line.strip()\n # Skip empty lines\n if not line:\n return True\n\n parts = line.split()\n if len(parts) == 1:\n # \"one host per line\" format\n hosts = [parts[0]]\n else:\n # /etc/hosts format\n hosts = parts[1:]\n\n for host in hosts:\n if ('.' in host and\n not host.endswith('.localdomain') and\n host != '0.0.0.0'):\n self._blocked_hosts.add(host)\n\n return True\n\n def _merge_file(self, byte_io):\n \"\"\"Read and merge host files.\n\n Args:\n byte_io: The BytesIO object of the completed download.\n\n Return:\n A set of the merged hosts.\n \"\"\"\n error_count = 0\n line_count = 0\n try:\n f = get_fileobj(byte_io)\n except (OSError, zipfile.BadZipFile, zipfile.LargeZipFile,\n LookupError) as e:\n message.error(\"adblock: Error while reading {}: {} - {}\".format(\n byte_io.name, e.__class__.__name__, e))\n return\n\n for line in f:\n line_count += 1\n ok = self._parse_line(line)\n if not ok:\n error_count += 1\n\n log.misc.debug(\"{}: read {} lines\".format(byte_io.name, line_count))\n if error_count > 0:\n message.error(\"adblock: {} read errors for {}\".format(\n error_count, byte_io.name))\n\n def _on_lists_downloaded(self):\n \"\"\"Install block lists after files have been downloaded.\"\"\"\n with open(self._local_hosts_file, 'w', encoding='utf-8') as f:\n for host in sorted(self._blocked_hosts):\n f.write(host + '\\n')\n message.info(\"adblock: Read {} hosts from {} sources.\".format(\n len(self._blocked_hosts), self._done_count))\n\n @config.change_filter('content.host_blocking.lists')\n def _update_files(self):\n \"\"\"Update files when the config changed.\"\"\"\n if not config.val.content.host_blocking.lists:\n try:\n os.remove(self._local_hosts_file)\n except FileNotFoundError:\n pass\n except OSError as e:\n log.misc.exception(\"Failed to delete hosts file: {}\".format(e))\n\n def _on_download_finished(self, download):\n \"\"\"Check if all downloads are finished and if so, trigger reading.\n\n Arguments:\n download: The finished DownloadItem.\n \"\"\"\n self._in_progress.remove(download)\n if download.successful:\n self._done_count += 1\n try:\n self._merge_file(download.fileobj)\n finally:\n download.fileobj.close()\n if not self._in_progress:\n try:\n self._on_lists_downloaded()\n except OSError:\n log.misc.exception(\"Failed to write host block list!\")\n", "path": "qutebrowser/browser/adblock.py"}]}
| 3,359 | 407 |
gh_patches_debug_1400
|
rasdani/github-patches
|
git_diff
|
conda__conda-7241
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda's configuration context is not initialized in conda.exports
root cause of https://github.com/conda-forge/conda-smithy/issues/762
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/exports.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from collections import Hashable
5 from logging import getLogger
6 import threading
7 from warnings import warn
8
9 log = getLogger(__name__)
10
11 from . import CondaError # NOQA
12 CondaError = CondaError
13
14 from . import compat, plan # NOQA
15 compat, plan = compat, plan
16
17 from .core.solve import Solver # NOQA
18 Solver = Solver
19
20 from .plan import display_actions # NOQA
21 display_actions = display_actions
22
23 from .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA
24 from .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA
25 add_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix
26 specs_from_args, spec_from_line = specs_from_args, spec_from_line
27 specs_from_url = specs_from_url
28
29 from .cli.conda_argparse import ArgumentParser # NOQA
30 ArgumentParser = ArgumentParser
31
32 from .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA
33 PY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA
34 from .gateways.connection.session import CondaSession # NOQA
35 CondaSession = CondaSession
36
37 from .common.toposort import _toposort # NOQA
38 _toposort = _toposort
39
40 from .gateways.disk.link import lchmod # NOQA
41 lchmod = lchmod
42
43 from .gateways.connection.download import TmpDownload # NOQA
44
45 TmpDownload = TmpDownload
46 handle_proxy_407 = lambda x, y: warn("handle_proxy_407 is deprecated. "
47 "Now handled by CondaSession.")
48 from .core.index import dist_str_in_index, fetch_index, get_index # NOQA
49 dist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA
50 from .core.package_cache_data import download, rm_fetched # NOQA
51 download, rm_fetched = download, rm_fetched
52
53 from .install import package_cache, prefix_placeholder, symlink_conda # NOQA
54 package_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda
55
56 from .gateways.disk.delete import delete_trash, move_to_trash # NOQA
57 delete_trash, move_to_trash = delete_trash, move_to_trash
58
59 from .core.prefix_data import is_linked, linked, linked_data # NOQA
60 is_linked, linked, linked_data = is_linked, linked, linked_data
61
62 from .misc import untracked, walk_prefix # NOQA
63 untracked, walk_prefix = untracked, walk_prefix
64
65 from .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA
66 MatchSpec, Resolve = MatchSpec, Resolve
67 Unsatisfiable = Unsatisfiable
68 NoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound
69
70 from .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA
71 from .common.path import win_path_to_unix # NOQA
72 hashsum_file, human_bytes = hashsum_file, human_bytes
73 unix_path_to_win = unix_path_to_win
74 win_path_to_unix, url_path = win_path_to_unix, url_path
75
76 from .gateways.disk.read import compute_md5sum # NOQA
77 md5_file = compute_md5sum
78
79 from .models.version import VersionOrder, normalized_version # NOQA
80 VersionOrder, normalized_version = VersionOrder, normalized_version # NOQA
81
82 import conda.base.context # NOQA
83 from .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA
84 non_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path
85 get_prefix = get_prefix
86
87 from ._vendor.auxlib.entity import EntityEncoder # NOQA
88 EntityEncoder = EntityEncoder
89 from .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA
90 DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA
91 get_default_urls = lambda: DEFAULT_CHANNELS
92
93 arch_name = conda.base.context.context.arch_name
94 binstar_upload = conda.base.context.context.anaconda_upload
95 bits = conda.base.context.context.bits
96 default_prefix = conda.base.context.context.default_prefix
97 default_python = conda.base.context.context.default_python
98 envs_dirs = conda.base.context.context.envs_dirs
99 pkgs_dirs = conda.base.context.context.pkgs_dirs
100 platform = conda.base.context.context.platform
101 root_dir = conda.base.context.context.root_prefix
102 root_writable = conda.base.context.context.root_writable
103 subdir = conda.base.context.context.subdir
104 conda_private = conda.base.context.context.conda_private
105 from .models.channel import get_conda_build_local_url # NOQA
106 get_rc_urls = lambda: list(conda.base.context.context.channels)
107 get_local_urls = lambda: list(get_conda_build_local_url()) or []
108 load_condarc = lambda fn: conda.base.context.reset_context([fn])
109 from .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA
110 PaddingError = PaddingError
111 LinkError = LinkError
112 CondaOSError = CondaOSError
113 # PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.
114 PathNotFoundError = CondaFileNotFoundError = PathNotFoundError
115 from .gateways.disk.link import CrossPlatformStLink # NOQA
116 CrossPlatformStLink = CrossPlatformStLink
117
118 from .models.enums import FileMode # NOQA
119 FileMode = FileMode
120 from .models.enums import PathType # NOQA
121 PathType = PathType
122
123 from .models.records import PackageRecord # NOQA
124 PackageRecord = IndexRecord = PackageRecord
125
126 from .compat import TemporaryDirectory # NOQA
127 TemporaryDirectory = TemporaryDirectory
128
129 from .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA
130 ACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call
131
132 from .core.subdir_data import cache_fn_url # NOQA
133 cache_fn_url = cache_fn_url
134
135
136 class Completer(object): # pragma: no cover
137 def get_items(self):
138 return self._get_items()
139
140 def __contains__(self, item):
141 return True
142
143 def __iter__(self):
144 return iter(self.get_items())
145
146
147 class InstalledPackages(object):
148 pass
149
150
151 class memoized(object): # pragma: no cover
152 """Decorator. Caches a function's return value each time it is called.
153 If called later with the same arguments, the cached value is returned
154 (not reevaluated).
155 """
156 def __init__(self, func):
157 self.func = func
158 self.cache = {}
159 self.lock = threading.Lock()
160
161 def __call__(self, *args, **kw):
162 newargs = []
163 for arg in args:
164 if isinstance(arg, list):
165 newargs.append(tuple(arg))
166 elif not isinstance(arg, Hashable):
167 # uncacheable. a list, for instance.
168 # better to not cache than blow up.
169 return self.func(*args, **kw)
170 else:
171 newargs.append(arg)
172 newargs = tuple(newargs)
173 key = (newargs, frozenset(sorted(kw.items())))
174 with self.lock:
175 if key in self.cache:
176 return self.cache[key]
177 else:
178 value = self.func(*args, **kw)
179 self.cache[key] = value
180 return value
181
182
183 from .gateways.disk.delete import rm_rf as _rm_rf # NOQA
184 from .core.prefix_data import delete_prefix_from_linked_data # NOQA
185
186
187 def rm_rf(path, max_retries=5, trash=True):
188 _rm_rf(path, max_retries, trash)
189 delete_prefix_from_linked_data(path)
190
191
192 # ######################
193 # signature.py
194 # ######################
195 KEYS = None
196 KEYS_DIR = None
197
198
199 def hash_file(_):
200 return None # pragma: no cover
201
202
203 def verify(_):
204 return False # pragma: no cover
205
206
207 from .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA
208 execute_actions, execute_instructions = execute_actions, execute_instructions
209 execute_plan, install_actions = execute_plan, install_actions
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/exports.py b/conda/exports.py
--- a/conda/exports.py
+++ b/conda/exports.py
@@ -11,6 +11,9 @@
from . import CondaError # NOQA
CondaError = CondaError
+from .base.context import reset_context # NOQA
+reset_context() # initialize context when conda.exports is imported
+
from . import compat, plan # NOQA
compat, plan = compat, plan
|
{"golden_diff": "diff --git a/conda/exports.py b/conda/exports.py\n--- a/conda/exports.py\n+++ b/conda/exports.py\n@@ -11,6 +11,9 @@\n from . import CondaError # NOQA\n CondaError = CondaError\n \n+from .base.context import reset_context # NOQA\n+reset_context() # initialize context when conda.exports is imported\n+\n from . import compat, plan # NOQA\n compat, plan = compat, plan\n", "issue": "conda's configuration context is not initialized in conda.exports\nroot cause of https://github.com/conda-forge/conda-smithy/issues/762\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import Hashable\nfrom logging import getLogger\nimport threading\nfrom warnings import warn\n\nlog = getLogger(__name__)\n\nfrom . import CondaError # NOQA\nCondaError = CondaError\n\nfrom .base.context import reset_context # NOQA\nreset_context() # initialize context when conda.exports is imported\n\nfrom . import compat, plan # NOQA\ncompat, plan = compat, plan\n\nfrom .core.solve import Solver # NOQA\nSolver = Solver\n\nfrom .plan import display_actions # NOQA\ndisplay_actions = display_actions\n\nfrom .cli.common import specs_from_args, spec_from_line, specs_from_url # NOQA\nfrom .cli.conda_argparse import add_parser_prefix, add_parser_channels # NOQA\nadd_parser_channels, add_parser_prefix = add_parser_channels, add_parser_prefix\nspecs_from_args, spec_from_line = specs_from_args, spec_from_line\nspecs_from_url = specs_from_url\n\nfrom .cli.conda_argparse import ArgumentParser # NOQA\nArgumentParser = ArgumentParser\n\nfrom .common.compat import PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nPY3, StringIO, input, iteritems, string_types, text_type = PY3, StringIO, input, iteritems, string_types, text_type # NOQA\nfrom .gateways.connection.session import CondaSession # NOQA\nCondaSession = CondaSession\n\nfrom .common.toposort import _toposort # NOQA\n_toposort = _toposort\n\nfrom .gateways.disk.link import lchmod # NOQA\nlchmod = lchmod\n\nfrom .gateways.connection.download import TmpDownload # NOQA\n\nTmpDownload = TmpDownload\nhandle_proxy_407 = lambda x, y: warn(\"handle_proxy_407 is deprecated. \"\n \"Now handled by CondaSession.\")\nfrom .core.index import dist_str_in_index, fetch_index, get_index # NOQA\ndist_str_in_index, fetch_index, get_index = dist_str_in_index, fetch_index, get_index # NOQA\nfrom .core.package_cache_data import download, rm_fetched # NOQA\ndownload, rm_fetched = download, rm_fetched\n\nfrom .install import package_cache, prefix_placeholder, symlink_conda # NOQA\npackage_cache, prefix_placeholder, symlink_conda = package_cache, prefix_placeholder, symlink_conda\n\nfrom .gateways.disk.delete import delete_trash, move_to_trash # NOQA\ndelete_trash, move_to_trash = delete_trash, move_to_trash\n\nfrom .core.prefix_data import is_linked, linked, linked_data # NOQA\nis_linked, linked, linked_data = is_linked, linked, linked_data\n\nfrom .misc import untracked, walk_prefix # NOQA\nuntracked, walk_prefix = untracked, walk_prefix\n\nfrom .resolve import MatchSpec, ResolvePackageNotFound, Resolve, Unsatisfiable # NOQA\nMatchSpec, Resolve = MatchSpec, Resolve\nUnsatisfiable = Unsatisfiable\nNoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound\n\nfrom .utils import hashsum_file, human_bytes, unix_path_to_win, url_path # NOQA\nfrom .common.path import win_path_to_unix # NOQA\nhashsum_file, human_bytes = hashsum_file, human_bytes\nunix_path_to_win = unix_path_to_win\nwin_path_to_unix, url_path = win_path_to_unix, url_path\n\nfrom .gateways.disk.read import compute_md5sum # NOQA\nmd5_file = compute_md5sum\n\nfrom .models.version import VersionOrder, normalized_version # NOQA\nVersionOrder, normalized_version = VersionOrder, normalized_version # NOQA\n\nimport conda.base.context # NOQA\nfrom .base.context import get_prefix, non_x86_linux_machines, sys_rc_path # NOQA\nnon_x86_linux_machines, sys_rc_path = non_x86_linux_machines, sys_rc_path\nget_prefix = get_prefix\n\nfrom ._vendor.auxlib.entity import EntityEncoder # NOQA\nEntityEncoder = EntityEncoder\nfrom .base.constants import DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nDEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX = DEFAULT_CHANNELS, DEFAULT_CHANNELS_WIN, DEFAULT_CHANNELS_UNIX # NOQA\nget_default_urls = lambda: DEFAULT_CHANNELS\n\narch_name = conda.base.context.context.arch_name\nbinstar_upload = conda.base.context.context.anaconda_upload\nbits = conda.base.context.context.bits\ndefault_prefix = conda.base.context.context.default_prefix\ndefault_python = conda.base.context.context.default_python\nenvs_dirs = conda.base.context.context.envs_dirs\npkgs_dirs = conda.base.context.context.pkgs_dirs\nplatform = conda.base.context.context.platform\nroot_dir = conda.base.context.context.root_prefix\nroot_writable = conda.base.context.context.root_writable\nsubdir = conda.base.context.context.subdir\nconda_private = conda.base.context.context.conda_private\nfrom .models.channel import get_conda_build_local_url # NOQA\nget_rc_urls = lambda: list(conda.base.context.context.channels)\nget_local_urls = lambda: list(get_conda_build_local_url()) or []\nload_condarc = lambda fn: conda.base.context.reset_context([fn])\nfrom .exceptions import PaddingError, LinkError, CondaOSError, PathNotFoundError # NOQA\nPaddingError = PaddingError\nLinkError = LinkError\nCondaOSError = CondaOSError\n# PathNotFoundError is the conda 4.4.x name for it - let's plan ahead.\nPathNotFoundError = CondaFileNotFoundError = PathNotFoundError\nfrom .gateways.disk.link import CrossPlatformStLink # NOQA\nCrossPlatformStLink = CrossPlatformStLink\n\nfrom .models.enums import FileMode # NOQA\nFileMode = FileMode\nfrom .models.enums import PathType # NOQA\nPathType = PathType\n\nfrom .models.records import PackageRecord # NOQA\nPackageRecord = IndexRecord = PackageRecord\n\nfrom .compat import TemporaryDirectory # NOQA\nTemporaryDirectory = TemporaryDirectory\n\nfrom .gateways.subprocess import ACTIVE_SUBPROCESSES, subprocess_call # NOQA\nACTIVE_SUBPROCESSES, subprocess_call = ACTIVE_SUBPROCESSES, subprocess_call\n\nfrom .core.subdir_data import cache_fn_url # NOQA\ncache_fn_url = cache_fn_url\n\n\nclass Completer(object): # pragma: no cover\n def get_items(self):\n return self._get_items()\n\n def __contains__(self, item):\n return True\n\n def __iter__(self):\n return iter(self.get_items())\n\n\nclass InstalledPackages(object):\n pass\n\n\nclass memoized(object): # pragma: no cover\n \"\"\"Decorator. Caches a function's return value each time it is called.\n If called later with the same arguments, the cached value is returned\n (not reevaluated).\n \"\"\"\n def __init__(self, func):\n self.func = func\n self.cache = {}\n self.lock = threading.Lock()\n\n def __call__(self, *args, **kw):\n newargs = []\n for arg in args:\n if isinstance(arg, list):\n newargs.append(tuple(arg))\n elif not isinstance(arg, Hashable):\n # uncacheable. a list, for instance.\n # better to not cache than blow up.\n return self.func(*args, **kw)\n else:\n newargs.append(arg)\n newargs = tuple(newargs)\n key = (newargs, frozenset(sorted(kw.items())))\n with self.lock:\n if key in self.cache:\n return self.cache[key]\n else:\n value = self.func(*args, **kw)\n self.cache[key] = value\n return value\n\n\nfrom .gateways.disk.delete import rm_rf as _rm_rf # NOQA\nfrom .core.prefix_data import delete_prefix_from_linked_data # NOQA\n\n\ndef rm_rf(path, max_retries=5, trash=True):\n _rm_rf(path, max_retries, trash)\n delete_prefix_from_linked_data(path)\n\n\n# ######################\n# signature.py\n# ######################\nKEYS = None\nKEYS_DIR = None\n\n\ndef hash_file(_):\n return None # pragma: no cover\n\n\ndef verify(_):\n return False # pragma: no cover\n\n\nfrom .plan import execute_actions, execute_instructions, execute_plan, install_actions # NOQA\nexecute_actions, execute_instructions = execute_actions, execute_instructions\nexecute_plan, install_actions = execute_plan, install_actions\n", "path": "conda/exports.py"}]}
| 2,701 | 111 |
gh_patches_debug_62284
|
rasdani/github-patches
|
git_diff
|
SciTools__cartopy-228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
coastlines drawn under filled contours
When using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:
```
ax.coastlines(zorder=1)
```
is used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/cartopy/mpl/feature_artist.py`
Content:
```
1 # (C) British Crown Copyright 2011 - 2012, Met Office
2 #
3 # This file is part of cartopy.
4 #
5 # cartopy is free software: you can redistribute it and/or modify it under
6 # the terms of the GNU Lesser General Public License as published by the
7 # Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # cartopy is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with cartopy. If not, see <http://www.gnu.org/licenses/>.
17 """
18 This module defines the :class:`FeatureArtist` class, for drawing
19 :class:`Feature` instances with matplotlib.
20
21 """
22 import warnings
23 import weakref
24
25 import matplotlib.artist
26 import matplotlib.cbook
27 import matplotlib.collections
28 import matplotlib.lines
29
30 import cartopy.mpl.patch
31
32
33 class FeatureArtist(matplotlib.artist.Artist):
34 """
35 A subclass of :class:`~matplotlib.artist.Artist` capable of
36 drawing a :class:`cartopy.feature.Feature`.
37
38 """
39 _geometry_to_path_cache = weakref.WeakKeyDictionary()
40 """
41 A nested mapping from geometry and target projection to the
42 resulting transformed matplotlib paths::
43
44 {geom: {target_projection: list_of_paths}}
45
46 This provides a significant boost when producing multiple maps of the
47 same projection.
48
49 """
50 def __init__(self, feature, **kwargs):
51 """
52 Args:
53
54 * feature:
55 an instance of :class:`cartopy.feature.Feature` to draw.
56 * kwargs:
57 keyword arguments to be used when drawing the feature. These
58 will override those shared with the feature.
59
60 """
61 super(FeatureArtist, self).__init__()
62
63 if kwargs is None:
64 kwargs = {}
65 self._kwargs = dict(kwargs)
66
67 # Set default zorder so that features are drawn before
68 # lines e.g. contours but after images.
69 # Note that the zorder of Patch, PatchCollection and PathCollection
70 # are all 1 by default. Assuming equal zorder drawing takes place in
71 # the following order: collections, patches, lines (default zorder=2),
72 # text (default zorder=3), then other artists e.g. FeatureArtist.
73 if self._kwargs.get('zorder') is not None:
74 self.set_zorder(self._kwargs['zorder'])
75 elif feature.kwargs.get('zorder') is not None:
76 self.set_zorder(feature.kwargs['zorder'])
77 else:
78 self.set_zorder(matplotlib.collections.PathCollection.zorder)
79
80 self._feature = feature
81
82 @matplotlib.artist.allow_rasterization
83 def draw(self, renderer, *args, **kwargs):
84 """
85 Draws the geometries of the feature that intersect with the extent of
86 the :class:`cartopy.mpl.GeoAxes` instance to which this
87 object has been added.
88
89 """
90 if not self.get_visible():
91 return
92
93 ax = self.get_axes()
94 feature_crs = self._feature.crs
95
96 # Get geometries that we need to draw.
97 extent = None
98 try:
99 extent = ax.get_extent(feature_crs)
100 except ValueError:
101 warnings.warn('Unable to determine extent. Defaulting to global.')
102 geoms = self._feature.intersecting_geometries(extent)
103
104 # Project (if necessary) and convert geometries to matplotlib paths.
105 paths = []
106 key = ax.projection
107 for geom in geoms:
108 mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,
109 {})
110 geom_paths = mapping.get(key)
111 if geom_paths is None:
112 if ax.projection != feature_crs:
113 projected_geom = ax.projection.project_geometry(
114 geom, feature_crs)
115 else:
116 projected_geom = geom
117 geom_paths = cartopy.mpl.patch.geos_to_path(
118 projected_geom)
119 mapping[key] = geom_paths
120 paths.extend(geom_paths)
121
122 # Build path collection and draw it.
123 transform = ax.projection._as_mpl_transform(ax)
124 # Combine all the keyword args in priority order
125 final_kwargs = dict(self._feature.kwargs)
126 final_kwargs.update(self._kwargs)
127 final_kwargs.update(kwargs)
128 c = matplotlib.collections.PathCollection(paths,
129 transform=transform,
130 **final_kwargs)
131 c.set_clip_path(ax.patch)
132 return c.draw(renderer)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py
--- a/lib/cartopy/mpl/feature_artist.py
+++ b/lib/cartopy/mpl/feature_artist.py
@@ -75,7 +75,10 @@
elif feature.kwargs.get('zorder') is not None:
self.set_zorder(feature.kwargs['zorder'])
else:
- self.set_zorder(matplotlib.collections.PathCollection.zorder)
+ # The class attribute matplotlib.collections.PathCollection.zorder
+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is
+ # used instead.
+ self.set_zorder(1)
self._feature = feature
|
{"golden_diff": "diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py\n--- a/lib/cartopy/mpl/feature_artist.py\n+++ b/lib/cartopy/mpl/feature_artist.py\n@@ -75,7 +75,10 @@\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n- self.set_zorder(matplotlib.collections.PathCollection.zorder)\n+ # The class attribute matplotlib.collections.PathCollection.zorder\n+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is\n+ # used instead.\n+ self.set_zorder(1)\n \n self._feature = feature\n", "issue": "coastlines drawn under filled contours\nWhen using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:\n\n```\nax.coastlines(zorder=1)\n```\n\nis used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2011 - 2012, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nThis module defines the :class:`FeatureArtist` class, for drawing\n:class:`Feature` instances with matplotlib.\n\n\"\"\"\nimport warnings\nimport weakref\n\nimport matplotlib.artist\nimport matplotlib.cbook\nimport matplotlib.collections\nimport matplotlib.lines\n\nimport cartopy.mpl.patch\n\n\nclass FeatureArtist(matplotlib.artist.Artist):\n \"\"\"\n A subclass of :class:`~matplotlib.artist.Artist` capable of\n drawing a :class:`cartopy.feature.Feature`.\n\n \"\"\"\n _geometry_to_path_cache = weakref.WeakKeyDictionary()\n \"\"\"\n A nested mapping from geometry and target projection to the\n resulting transformed matplotlib paths::\n\n {geom: {target_projection: list_of_paths}}\n\n This provides a significant boost when producing multiple maps of the\n same projection.\n\n \"\"\"\n def __init__(self, feature, **kwargs):\n \"\"\"\n Args:\n\n * feature:\n an instance of :class:`cartopy.feature.Feature` to draw.\n * kwargs:\n keyword arguments to be used when drawing the feature. These\n will override those shared with the feature.\n\n \"\"\"\n super(FeatureArtist, self).__init__()\n\n if kwargs is None:\n kwargs = {}\n self._kwargs = dict(kwargs)\n\n # Set default zorder so that features are drawn before\n # lines e.g. contours but after images.\n # Note that the zorder of Patch, PatchCollection and PathCollection\n # are all 1 by default. Assuming equal zorder drawing takes place in\n # the following order: collections, patches, lines (default zorder=2),\n # text (default zorder=3), then other artists e.g. FeatureArtist.\n if self._kwargs.get('zorder') is not None:\n self.set_zorder(self._kwargs['zorder'])\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n self.set_zorder(matplotlib.collections.PathCollection.zorder)\n\n self._feature = feature\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n \"\"\"\n Draws the geometries of the feature that intersect with the extent of\n the :class:`cartopy.mpl.GeoAxes` instance to which this\n object has been added.\n\n \"\"\"\n if not self.get_visible():\n return\n\n ax = self.get_axes()\n feature_crs = self._feature.crs\n\n # Get geometries that we need to draw.\n extent = None\n try:\n extent = ax.get_extent(feature_crs)\n except ValueError:\n warnings.warn('Unable to determine extent. Defaulting to global.')\n geoms = self._feature.intersecting_geometries(extent)\n\n # Project (if necessary) and convert geometries to matplotlib paths.\n paths = []\n key = ax.projection\n for geom in geoms:\n mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,\n {})\n geom_paths = mapping.get(key)\n if geom_paths is None:\n if ax.projection != feature_crs:\n projected_geom = ax.projection.project_geometry(\n geom, feature_crs)\n else:\n projected_geom = geom\n geom_paths = cartopy.mpl.patch.geos_to_path(\n projected_geom)\n mapping[key] = geom_paths\n paths.extend(geom_paths)\n\n # Build path collection and draw it.\n transform = ax.projection._as_mpl_transform(ax)\n # Combine all the keyword args in priority order\n final_kwargs = dict(self._feature.kwargs)\n final_kwargs.update(self._kwargs)\n final_kwargs.update(kwargs)\n c = matplotlib.collections.PathCollection(paths,\n transform=transform,\n **final_kwargs)\n c.set_clip_path(ax.patch)\n return c.draw(renderer)\n", "path": "lib/cartopy/mpl/feature_artist.py"}], "after_files": [{"content": "# (C) British Crown Copyright 2011 - 2012, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nThis module defines the :class:`FeatureArtist` class, for drawing\n:class:`Feature` instances with matplotlib.\n\n\"\"\"\nimport warnings\nimport weakref\n\nimport matplotlib.artist\nimport matplotlib.cbook\nimport matplotlib.collections\nimport matplotlib.lines\n\nimport cartopy.mpl.patch\n\n\nclass FeatureArtist(matplotlib.artist.Artist):\n \"\"\"\n A subclass of :class:`~matplotlib.artist.Artist` capable of\n drawing a :class:`cartopy.feature.Feature`.\n\n \"\"\"\n _geometry_to_path_cache = weakref.WeakKeyDictionary()\n \"\"\"\n A nested mapping from geometry and target projection to the\n resulting transformed matplotlib paths::\n\n {geom: {target_projection: list_of_paths}}\n\n This provides a significant boost when producing multiple maps of the\n same projection.\n\n \"\"\"\n def __init__(self, feature, **kwargs):\n \"\"\"\n Args:\n\n * feature:\n an instance of :class:`cartopy.feature.Feature` to draw.\n * kwargs:\n keyword arguments to be used when drawing the feature. These\n will override those shared with the feature.\n\n \"\"\"\n super(FeatureArtist, self).__init__()\n\n if kwargs is None:\n kwargs = {}\n self._kwargs = dict(kwargs)\n\n # Set default zorder so that features are drawn before\n # lines e.g. contours but after images.\n # Note that the zorder of Patch, PatchCollection and PathCollection\n # are all 1 by default. Assuming equal zorder drawing takes place in\n # the following order: collections, patches, lines (default zorder=2),\n # text (default zorder=3), then other artists e.g. FeatureArtist.\n if self._kwargs.get('zorder') is not None:\n self.set_zorder(self._kwargs['zorder'])\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n # The class attribute matplotlib.collections.PathCollection.zorder\n # was removed after mpl v1.2.0, so the hard-coded value of 1 is\n # used instead.\n self.set_zorder(1)\n\n self._feature = feature\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n \"\"\"\n Draws the geometries of the feature that intersect with the extent of\n the :class:`cartopy.mpl.GeoAxes` instance to which this\n object has been added.\n\n \"\"\"\n if not self.get_visible():\n return\n\n ax = self.get_axes()\n feature_crs = self._feature.crs\n\n # Get geometries that we need to draw.\n extent = None\n try:\n extent = ax.get_extent(feature_crs)\n except ValueError:\n warnings.warn('Unable to determine extent. Defaulting to global.')\n geoms = self._feature.intersecting_geometries(extent)\n\n # Project (if necessary) and convert geometries to matplotlib paths.\n paths = []\n key = ax.projection\n for geom in geoms:\n mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,\n {})\n geom_paths = mapping.get(key)\n if geom_paths is None:\n if ax.projection != feature_crs:\n projected_geom = ax.projection.project_geometry(\n geom, feature_crs)\n else:\n projected_geom = geom\n geom_paths = cartopy.mpl.patch.geos_to_path(\n projected_geom)\n mapping[key] = geom_paths\n paths.extend(geom_paths)\n\n # Build path collection and draw it.\n transform = ax.projection._as_mpl_transform(ax)\n # Combine all the keyword args in priority order\n final_kwargs = dict(self._feature.kwargs)\n final_kwargs.update(self._kwargs)\n final_kwargs.update(kwargs)\n c = matplotlib.collections.PathCollection(paths,\n transform=transform,\n **final_kwargs)\n c.set_clip_path(ax.patch)\n return c.draw(renderer)\n", "path": "lib/cartopy/mpl/feature_artist.py"}]}
| 1,644 | 162 |
gh_patches_debug_12602
|
rasdani/github-patches
|
git_diff
|
translate__translate-3880
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
csv2po new line
Hi there, i have CSV with:
```csv
"yellow pencil","żółty\nołówek"
"photo description","opis zdjęcia"
```
And converted with csv2po:
```po
msgid "yellow pencil"
msgstr "żółtynołówek"
msgid "photo description"
msgstr "opis zdjęcia"
```
How to convert with save format "\n", script delete \ from string.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `translate/storage/csvl10n.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2002-2006 Zuza Software Foundation
4 #
5 # This file is part of translate.
6 #
7 # translate is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # translate is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
20 """classes that hold units of comma-separated values (.csv) files (csvunit)
21 or entire files (csvfile) for use with localisation
22 """
23
24 from __future__ import unicode_literals
25
26 import six
27 if six.PY2:
28 from backports import csv
29 else:
30 import csv
31
32 from translate.storage import base
33
34
35 class DefaultDialect(csv.excel):
36 skipinitialspace = True
37 quoting = csv.QUOTE_ALL
38 escapechar = '\\'
39
40
41 csv.register_dialect('default', DefaultDialect)
42
43
44 @six.python_2_unicode_compatible
45 class csvunit(base.TranslationUnit):
46 spreadsheetescapes = [("+", "\\+"), ("-", "\\-"), ("=", "\\="), ("'", "\\'")]
47
48 def __init__(self, source=None):
49 super(csvunit, self).__init__(source)
50 self.location = ""
51 self.source = source or ""
52 self.target = ""
53 self.id = ""
54 self.fuzzy = 'False'
55 self.developer_comments = ""
56 self.translator_comments = ""
57 self.context = ""
58
59 def getid(self):
60 if self.id:
61 return self.id
62
63 result = self.source
64 context = self.context
65 if context:
66 result = u"%s\04%s" % (context, result)
67
68 return result
69
70 def setid(self, value):
71 self.id = value
72
73 def getlocations(self):
74 #FIXME: do we need to support more than one location
75 return [self.location]
76
77 def addlocation(self, location):
78 self.location = location
79
80 def getcontext(self):
81 return self.context
82
83 def setcontext(self, value):
84 self.context = value
85
86 def getnotes(self, origin=None):
87 if origin is None:
88 result = self.translator_comments
89 if self.developer_comments:
90 if result:
91 result += '\n' + self.developer_comments
92 else:
93 result = self.developer_comments
94 return result
95 elif origin == "translator":
96 return self.translator_comments
97 elif origin in ('programmer', 'developer', 'source code'):
98 return self.developer_comments
99 else:
100 raise ValueError("Comment type not valid")
101
102 def addnote(self, text, origin=None, position="append"):
103 if origin in ('programmer', 'developer', 'source code'):
104 if position == 'append' and self.developer_comments:
105 self.developer_comments += '\n' + text
106 elif position == 'prepend' and self.developer_comments:
107 self.developer_comments = text + '\n' + self.developer_comments
108 else:
109 self.developer_comments = text
110 else:
111 if position == 'append' and self.translator_comments:
112 self.translator_comments += '\n' + text
113 elif position == 'prepend' and self.translator_comments:
114 self.translator_comments = self.translator_comments + '\n' + text
115 else:
116 self.translator_comments = text
117
118 def removenotes(self):
119 self.translator_comments = u''
120
121 def isfuzzy(self):
122 if self.fuzzy.lower() in ('1', 'x', 'true', 'yes', 'fuzzy'):
123 return True
124 return False
125
126 def markfuzzy(self, value=True):
127 if value:
128 self.fuzzy = 'True'
129 else:
130 self.fuzzy = 'False'
131
132 def match_header(self):
133 """see if unit might be a header"""
134 some_value = False
135 for key, value in six.iteritems(self.todict()):
136 if value:
137 some_value = True
138 if key.lower() != 'fuzzy' and value and key.lower() != value.lower():
139 return False
140 return some_value
141
142 def add_spreadsheet_escapes(self, source, target):
143 """add common spreadsheet escapes to two strings"""
144 for unescaped, escaped in self.spreadsheetescapes:
145 if source.startswith(unescaped):
146 source = source.replace(unescaped, escaped, 1)
147 if target.startswith(unescaped):
148 target = target.replace(unescaped, escaped, 1)
149 return source, target
150
151 def remove_spreadsheet_escapes(self, source, target):
152 """remove common spreadsheet escapes from two strings"""
153 for unescaped, escaped in self.spreadsheetescapes:
154 if source.startswith(escaped):
155 source = source.replace(escaped, unescaped, 1)
156 if target.startswith(escaped):
157 target = target.replace(escaped, unescaped, 1)
158 return source, target
159
160 def fromdict(self, cedict, encoding='utf-8'):
161 for key, value in six.iteritems(cedict):
162 rkey = fieldname_map.get(key, key)
163 if value is None or key is None or key == EXTRA_KEY:
164 continue
165 if rkey == "id":
166 self.id = value
167 elif rkey == "source":
168 self.source = value
169 elif rkey == "target":
170 self.target = value
171 elif rkey == "location":
172 self.location = value
173 elif rkey == "fuzzy":
174 self.fuzzy = value
175 elif rkey == "context":
176 self.context = value
177 elif rkey == "translator_comments":
178 self.translator_comments = value
179 elif rkey == "developer_comments":
180 self.developer_comments = value
181
182 #self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)
183
184 def todict(self, **kwargs):
185 #FIXME: use apis?
186 #source, target = self.add_spreadsheet_escapes(self.source, self.target)
187 source = self.source
188 target = self.target
189 output = {
190 'location': self.location,
191 'source': source,
192 'target': target,
193 'id': self.id,
194 'fuzzy': str(self.fuzzy),
195 'context': self.context,
196 'translator_comments': self.translator_comments,
197 'developer_comments': self.developer_comments,
198 }
199
200 return output
201
202 def __str__(self):
203 return str(self.todict())
204
205
206 fieldname_map = {
207 'original': 'source',
208 'untranslated': 'source',
209 'translated': 'target',
210 'translation': 'target',
211 'identified': 'id',
212 'key': 'id',
213 'label': 'id',
214 'translator comments': 'translator_comments',
215 'notes': 'translator_comments',
216 'developer comments': 'developer_comments',
217 'state': 'fuzzy',
218 }
219
220
221 EXTRA_KEY = '__CSVL10N__EXTRA__'
222
223
224 def try_dialects(inputfile, fieldnames, dialect):
225 #FIXME: does it verify at all if we don't actually step through the file?
226 try:
227 inputfile.seek(0)
228 reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect=dialect, restkey=EXTRA_KEY)
229 except csv.Error:
230 try:
231 inputfile.seek(0)
232 reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect='default', restkey=EXTRA_KEY)
233 except csv.Error:
234 inputfile.seek(0)
235 reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect='excel', restkey=EXTRA_KEY)
236 return reader
237
238
239 def valid_fieldnames(fieldnames):
240 """Check if fieldnames are valid, that is at least one field is identified
241 as the source.
242 """
243 for fieldname in fieldnames:
244 if fieldname == 'source':
245 return True
246 elif fieldname in fieldname_map and fieldname_map[fieldname] == 'source':
247 return True
248 return False
249
250
251 def detect_header(sample, dialect, fieldnames):
252 """Test if file has a header or not, also returns number of columns in first row"""
253 inputfile = csv.StringIO(sample)
254 try:
255 reader = csv.reader(inputfile, dialect)
256 except csv.Error:
257 try:
258 inputfile.seek(0)
259 reader = csv.reader(inputfile, 'default')
260 except csv.Error:
261 inputfile.seek(0)
262 reader = csv.reader(inputfile, 'excel')
263
264 header = next(reader)
265 columncount = max(len(header), 3)
266 if valid_fieldnames(header):
267 return header
268 return fieldnames[:columncount]
269
270
271 class csvfile(base.TranslationStore):
272 """This class represents a .csv file with various lines. The default
273 format contains three columns: location, source, target
274 """
275
276 UnitClass = csvunit
277 Name = "Comma Separated Value"
278 Mimetypes = ['text/comma-separated-values', 'text/csv']
279 Extensions = ["csv"]
280
281 def __init__(self, inputfile=None, fieldnames=None, encoding='auto'):
282 super(csvfile, self).__init__(encoding=encoding)
283 if not fieldnames:
284 self.fieldnames = ['location', 'source', 'target', 'id', 'fuzzy', 'context', 'translator_comments', 'developer_comments']
285 else:
286 self.fieldnames = fieldnames
287 self.filename = getattr(inputfile, 'name', '')
288 self.dialect = 'default'
289 if inputfile is not None:
290 csvsrc = inputfile.read()
291 inputfile.close()
292 self.parse(csvsrc)
293
294 def parse(self, csvsrc):
295 text, encoding = self.detect_encoding(csvsrc, default_encodings=['utf-8', 'utf-16'])
296 #FIXME: raise parse error if encoding detection fails?
297 self.encoding = encoding or 'utf-8'
298
299 sniffer = csv.Sniffer()
300 sample = text[:1024]
301
302 try:
303 self.dialect = sniffer.sniff(sample)
304 if not self.dialect.escapechar:
305 self.dialect.escapechar = '\\'
306 if self.dialect.quoting == csv.QUOTE_MINIMAL:
307 #HACKISH: most probably a default, not real detection
308 self.dialect.quoting = csv.QUOTE_ALL
309 self.dialect.doublequote = True
310 except csv.Error:
311 self.dialect = 'default'
312
313 try:
314 fieldnames = detect_header(sample, self.dialect, self.fieldnames)
315 self.fieldnames = fieldnames
316 except csv.Error:
317 pass
318
319 inputfile = csv.StringIO(text)
320 reader = try_dialects(inputfile, self.fieldnames, self.dialect)
321
322 first_row = True
323 for row in reader:
324 newce = self.UnitClass()
325 newce.fromdict(row)
326 if not first_row or not newce.match_header():
327 self.addunit(newce)
328 first_row = False
329
330 def serialize(self, out):
331 """Write to file"""
332 source = self.getoutput()
333 if isinstance(source, six.text_type):
334 # Python 3
335 out.write(source.encode(self.encoding))
336 else:
337 out.write(source)
338
339 def getoutput(self):
340 output = csv.StringIO()
341 writer = csv.DictWriter(output, self.fieldnames,
342 extrasaction='ignore',
343 dialect=self.dialect)
344 writer.writeheader()
345 for ce in self.units:
346 writer.writerow(ce.todict())
347 return output.getvalue()
348
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/translate/storage/csvl10n.py b/translate/storage/csvl10n.py
--- a/translate/storage/csvl10n.py
+++ b/translate/storage/csvl10n.py
@@ -301,12 +301,10 @@
try:
self.dialect = sniffer.sniff(sample)
- if not self.dialect.escapechar:
- self.dialect.escapechar = '\\'
- if self.dialect.quoting == csv.QUOTE_MINIMAL:
- #HACKISH: most probably a default, not real detection
- self.dialect.quoting = csv.QUOTE_ALL
- self.dialect.doublequote = True
+ if self.dialect.quoting == csv.QUOTE_MINIMAL:
+ #HACKISH: most probably a default, not real detection
+ self.dialect.quoting = csv.QUOTE_ALL
+ self.dialect.doublequote = True
except csv.Error:
self.dialect = 'default'
|
{"golden_diff": "diff --git a/translate/storage/csvl10n.py b/translate/storage/csvl10n.py\n--- a/translate/storage/csvl10n.py\n+++ b/translate/storage/csvl10n.py\n@@ -301,12 +301,10 @@\n \n try:\n self.dialect = sniffer.sniff(sample)\n- if not self.dialect.escapechar:\n- self.dialect.escapechar = '\\\\'\n- if self.dialect.quoting == csv.QUOTE_MINIMAL:\n- #HACKISH: most probably a default, not real detection\n- self.dialect.quoting = csv.QUOTE_ALL\n- self.dialect.doublequote = True\n+ if self.dialect.quoting == csv.QUOTE_MINIMAL:\n+ #HACKISH: most probably a default, not real detection\n+ self.dialect.quoting = csv.QUOTE_ALL\n+ self.dialect.doublequote = True\n except csv.Error:\n self.dialect = 'default'\n", "issue": "csv2po new line\nHi there, i have CSV with:\r\n\r\n```csv\r\n\"yellow pencil\",\"\u017c\u00f3\u0142ty\\no\u0142\u00f3wek\"\r\n\"photo description\",\"opis zdj\u0119cia\"\r\n```\r\n\r\nAnd converted with csv2po:\r\n\r\n```po\r\nmsgid \"yellow pencil\"\r\nmsgstr \"\u017c\u00f3\u0142tyno\u0142\u00f3wek\"\r\n\r\nmsgid \"photo description\"\r\nmsgstr \"opis zdj\u0119cia\"\r\n```\r\n\r\nHow to convert with save format \"\\n\", script delete \\ from string.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"classes that hold units of comma-separated values (.csv) files (csvunit)\nor entire files (csvfile) for use with localisation\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport six\nif six.PY2:\n from backports import csv\nelse:\n import csv\n\nfrom translate.storage import base\n\n\nclass DefaultDialect(csv.excel):\n skipinitialspace = True\n quoting = csv.QUOTE_ALL\n escapechar = '\\\\'\n\n\ncsv.register_dialect('default', DefaultDialect)\n\n\[email protected]_2_unicode_compatible\nclass csvunit(base.TranslationUnit):\n spreadsheetescapes = [(\"+\", \"\\\\+\"), (\"-\", \"\\\\-\"), (\"=\", \"\\\\=\"), (\"'\", \"\\\\'\")]\n\n def __init__(self, source=None):\n super(csvunit, self).__init__(source)\n self.location = \"\"\n self.source = source or \"\"\n self.target = \"\"\n self.id = \"\"\n self.fuzzy = 'False'\n self.developer_comments = \"\"\n self.translator_comments = \"\"\n self.context = \"\"\n\n def getid(self):\n if self.id:\n return self.id\n\n result = self.source\n context = self.context\n if context:\n result = u\"%s\\04%s\" % (context, result)\n\n return result\n\n def setid(self, value):\n self.id = value\n\n def getlocations(self):\n #FIXME: do we need to support more than one location\n return [self.location]\n\n def addlocation(self, location):\n self.location = location\n\n def getcontext(self):\n return self.context\n\n def setcontext(self, value):\n self.context = value\n\n def getnotes(self, origin=None):\n if origin is None:\n result = self.translator_comments\n if self.developer_comments:\n if result:\n result += '\\n' + self.developer_comments\n else:\n result = self.developer_comments\n return result\n elif origin == \"translator\":\n return self.translator_comments\n elif origin in ('programmer', 'developer', 'source code'):\n return self.developer_comments\n else:\n raise ValueError(\"Comment type not valid\")\n\n def addnote(self, text, origin=None, position=\"append\"):\n if origin in ('programmer', 'developer', 'source code'):\n if position == 'append' and self.developer_comments:\n self.developer_comments += '\\n' + text\n elif position == 'prepend' and self.developer_comments:\n self.developer_comments = text + '\\n' + self.developer_comments\n else:\n self.developer_comments = text\n else:\n if position == 'append' and self.translator_comments:\n self.translator_comments += '\\n' + text\n elif position == 'prepend' and self.translator_comments:\n self.translator_comments = self.translator_comments + '\\n' + text\n else:\n self.translator_comments = text\n\n def removenotes(self):\n self.translator_comments = u''\n\n def isfuzzy(self):\n if self.fuzzy.lower() in ('1', 'x', 'true', 'yes', 'fuzzy'):\n return True\n return False\n\n def markfuzzy(self, value=True):\n if value:\n self.fuzzy = 'True'\n else:\n self.fuzzy = 'False'\n\n def match_header(self):\n \"\"\"see if unit might be a header\"\"\"\n some_value = False\n for key, value in six.iteritems(self.todict()):\n if value:\n some_value = True\n if key.lower() != 'fuzzy' and value and key.lower() != value.lower():\n return False\n return some_value\n\n def add_spreadsheet_escapes(self, source, target):\n \"\"\"add common spreadsheet escapes to two strings\"\"\"\n for unescaped, escaped in self.spreadsheetescapes:\n if source.startswith(unescaped):\n source = source.replace(unescaped, escaped, 1)\n if target.startswith(unescaped):\n target = target.replace(unescaped, escaped, 1)\n return source, target\n\n def remove_spreadsheet_escapes(self, source, target):\n \"\"\"remove common spreadsheet escapes from two strings\"\"\"\n for unescaped, escaped in self.spreadsheetescapes:\n if source.startswith(escaped):\n source = source.replace(escaped, unescaped, 1)\n if target.startswith(escaped):\n target = target.replace(escaped, unescaped, 1)\n return source, target\n\n def fromdict(self, cedict, encoding='utf-8'):\n for key, value in six.iteritems(cedict):\n rkey = fieldname_map.get(key, key)\n if value is None or key is None or key == EXTRA_KEY:\n continue\n if rkey == \"id\":\n self.id = value\n elif rkey == \"source\":\n self.source = value\n elif rkey == \"target\":\n self.target = value\n elif rkey == \"location\":\n self.location = value\n elif rkey == \"fuzzy\":\n self.fuzzy = value\n elif rkey == \"context\":\n self.context = value\n elif rkey == \"translator_comments\":\n self.translator_comments = value\n elif rkey == \"developer_comments\":\n self.developer_comments = value\n\n #self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)\n\n def todict(self, **kwargs):\n #FIXME: use apis?\n #source, target = self.add_spreadsheet_escapes(self.source, self.target)\n source = self.source\n target = self.target\n output = {\n 'location': self.location,\n 'source': source,\n 'target': target,\n 'id': self.id,\n 'fuzzy': str(self.fuzzy),\n 'context': self.context,\n 'translator_comments': self.translator_comments,\n 'developer_comments': self.developer_comments,\n }\n\n return output\n\n def __str__(self):\n return str(self.todict())\n\n\nfieldname_map = {\n 'original': 'source',\n 'untranslated': 'source',\n 'translated': 'target',\n 'translation': 'target',\n 'identified': 'id',\n 'key': 'id',\n 'label': 'id',\n 'translator comments': 'translator_comments',\n 'notes': 'translator_comments',\n 'developer comments': 'developer_comments',\n 'state': 'fuzzy',\n}\n\n\nEXTRA_KEY = '__CSVL10N__EXTRA__'\n\n\ndef try_dialects(inputfile, fieldnames, dialect):\n #FIXME: does it verify at all if we don't actually step through the file?\n try:\n inputfile.seek(0)\n reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect=dialect, restkey=EXTRA_KEY)\n except csv.Error:\n try:\n inputfile.seek(0)\n reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect='default', restkey=EXTRA_KEY)\n except csv.Error:\n inputfile.seek(0)\n reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect='excel', restkey=EXTRA_KEY)\n return reader\n\n\ndef valid_fieldnames(fieldnames):\n \"\"\"Check if fieldnames are valid, that is at least one field is identified\n as the source.\n \"\"\"\n for fieldname in fieldnames:\n if fieldname == 'source':\n return True\n elif fieldname in fieldname_map and fieldname_map[fieldname] == 'source':\n return True\n return False\n\n\ndef detect_header(sample, dialect, fieldnames):\n \"\"\"Test if file has a header or not, also returns number of columns in first row\"\"\"\n inputfile = csv.StringIO(sample)\n try:\n reader = csv.reader(inputfile, dialect)\n except csv.Error:\n try:\n inputfile.seek(0)\n reader = csv.reader(inputfile, 'default')\n except csv.Error:\n inputfile.seek(0)\n reader = csv.reader(inputfile, 'excel')\n\n header = next(reader)\n columncount = max(len(header), 3)\n if valid_fieldnames(header):\n return header\n return fieldnames[:columncount]\n\n\nclass csvfile(base.TranslationStore):\n \"\"\"This class represents a .csv file with various lines. The default\n format contains three columns: location, source, target\n \"\"\"\n\n UnitClass = csvunit\n Name = \"Comma Separated Value\"\n Mimetypes = ['text/comma-separated-values', 'text/csv']\n Extensions = [\"csv\"]\n\n def __init__(self, inputfile=None, fieldnames=None, encoding='auto'):\n super(csvfile, self).__init__(encoding=encoding)\n if not fieldnames:\n self.fieldnames = ['location', 'source', 'target', 'id', 'fuzzy', 'context', 'translator_comments', 'developer_comments']\n else:\n self.fieldnames = fieldnames\n self.filename = getattr(inputfile, 'name', '')\n self.dialect = 'default'\n if inputfile is not None:\n csvsrc = inputfile.read()\n inputfile.close()\n self.parse(csvsrc)\n\n def parse(self, csvsrc):\n text, encoding = self.detect_encoding(csvsrc, default_encodings=['utf-8', 'utf-16'])\n #FIXME: raise parse error if encoding detection fails?\n self.encoding = encoding or 'utf-8'\n\n sniffer = csv.Sniffer()\n sample = text[:1024]\n\n try:\n self.dialect = sniffer.sniff(sample)\n if not self.dialect.escapechar:\n self.dialect.escapechar = '\\\\'\n if self.dialect.quoting == csv.QUOTE_MINIMAL:\n #HACKISH: most probably a default, not real detection\n self.dialect.quoting = csv.QUOTE_ALL\n self.dialect.doublequote = True\n except csv.Error:\n self.dialect = 'default'\n\n try:\n fieldnames = detect_header(sample, self.dialect, self.fieldnames)\n self.fieldnames = fieldnames\n except csv.Error:\n pass\n\n inputfile = csv.StringIO(text)\n reader = try_dialects(inputfile, self.fieldnames, self.dialect)\n\n first_row = True\n for row in reader:\n newce = self.UnitClass()\n newce.fromdict(row)\n if not first_row or not newce.match_header():\n self.addunit(newce)\n first_row = False\n\n def serialize(self, out):\n \"\"\"Write to file\"\"\"\n source = self.getoutput()\n if isinstance(source, six.text_type):\n # Python 3\n out.write(source.encode(self.encoding))\n else:\n out.write(source)\n\n def getoutput(self):\n output = csv.StringIO()\n writer = csv.DictWriter(output, self.fieldnames,\n extrasaction='ignore',\n dialect=self.dialect)\n writer.writeheader()\n for ce in self.units:\n writer.writerow(ce.todict())\n return output.getvalue()\n", "path": "translate/storage/csvl10n.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2002-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"classes that hold units of comma-separated values (.csv) files (csvunit)\nor entire files (csvfile) for use with localisation\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport six\nif six.PY2:\n from backports import csv\nelse:\n import csv\n\nfrom translate.storage import base\n\n\nclass DefaultDialect(csv.excel):\n skipinitialspace = True\n quoting = csv.QUOTE_ALL\n escapechar = '\\\\'\n\n\ncsv.register_dialect('default', DefaultDialect)\n\n\[email protected]_2_unicode_compatible\nclass csvunit(base.TranslationUnit):\n spreadsheetescapes = [(\"+\", \"\\\\+\"), (\"-\", \"\\\\-\"), (\"=\", \"\\\\=\"), (\"'\", \"\\\\'\")]\n\n def __init__(self, source=None):\n super(csvunit, self).__init__(source)\n self.location = \"\"\n self.source = source or \"\"\n self.target = \"\"\n self.id = \"\"\n self.fuzzy = 'False'\n self.developer_comments = \"\"\n self.translator_comments = \"\"\n self.context = \"\"\n\n def getid(self):\n if self.id:\n return self.id\n\n result = self.source\n context = self.context\n if context:\n result = u\"%s\\04%s\" % (context, result)\n\n return result\n\n def setid(self, value):\n self.id = value\n\n def getlocations(self):\n #FIXME: do we need to support more than one location\n return [self.location]\n\n def addlocation(self, location):\n self.location = location\n\n def getcontext(self):\n return self.context\n\n def setcontext(self, value):\n self.context = value\n\n def getnotes(self, origin=None):\n if origin is None:\n result = self.translator_comments\n if self.developer_comments:\n if result:\n result += '\\n' + self.developer_comments\n else:\n result = self.developer_comments\n return result\n elif origin == \"translator\":\n return self.translator_comments\n elif origin in ('programmer', 'developer', 'source code'):\n return self.developer_comments\n else:\n raise ValueError(\"Comment type not valid\")\n\n def addnote(self, text, origin=None, position=\"append\"):\n if origin in ('programmer', 'developer', 'source code'):\n if position == 'append' and self.developer_comments:\n self.developer_comments += '\\n' + text\n elif position == 'prepend' and self.developer_comments:\n self.developer_comments = text + '\\n' + self.developer_comments\n else:\n self.developer_comments = text\n else:\n if position == 'append' and self.translator_comments:\n self.translator_comments += '\\n' + text\n elif position == 'prepend' and self.translator_comments:\n self.translator_comments = self.translator_comments + '\\n' + text\n else:\n self.translator_comments = text\n\n def removenotes(self):\n self.translator_comments = u''\n\n def isfuzzy(self):\n if self.fuzzy.lower() in ('1', 'x', 'true', 'yes', 'fuzzy'):\n return True\n return False\n\n def markfuzzy(self, value=True):\n if value:\n self.fuzzy = 'True'\n else:\n self.fuzzy = 'False'\n\n def match_header(self):\n \"\"\"see if unit might be a header\"\"\"\n some_value = False\n for key, value in six.iteritems(self.todict()):\n if value:\n some_value = True\n if key.lower() != 'fuzzy' and value and key.lower() != value.lower():\n return False\n return some_value\n\n def add_spreadsheet_escapes(self, source, target):\n \"\"\"add common spreadsheet escapes to two strings\"\"\"\n for unescaped, escaped in self.spreadsheetescapes:\n if source.startswith(unescaped):\n source = source.replace(unescaped, escaped, 1)\n if target.startswith(unescaped):\n target = target.replace(unescaped, escaped, 1)\n return source, target\n\n def remove_spreadsheet_escapes(self, source, target):\n \"\"\"remove common spreadsheet escapes from two strings\"\"\"\n for unescaped, escaped in self.spreadsheetescapes:\n if source.startswith(escaped):\n source = source.replace(escaped, unescaped, 1)\n if target.startswith(escaped):\n target = target.replace(escaped, unescaped, 1)\n return source, target\n\n def fromdict(self, cedict, encoding='utf-8'):\n for key, value in six.iteritems(cedict):\n rkey = fieldname_map.get(key, key)\n if value is None or key is None or key == EXTRA_KEY:\n continue\n if rkey == \"id\":\n self.id = value\n elif rkey == \"source\":\n self.source = value\n elif rkey == \"target\":\n self.target = value\n elif rkey == \"location\":\n self.location = value\n elif rkey == \"fuzzy\":\n self.fuzzy = value\n elif rkey == \"context\":\n self.context = value\n elif rkey == \"translator_comments\":\n self.translator_comments = value\n elif rkey == \"developer_comments\":\n self.developer_comments = value\n\n #self.source, self.target = self.remove_spreadsheet_escapes(self.source, self.target)\n\n def todict(self, **kwargs):\n #FIXME: use apis?\n #source, target = self.add_spreadsheet_escapes(self.source, self.target)\n source = self.source\n target = self.target\n output = {\n 'location': self.location,\n 'source': source,\n 'target': target,\n 'id': self.id,\n 'fuzzy': str(self.fuzzy),\n 'context': self.context,\n 'translator_comments': self.translator_comments,\n 'developer_comments': self.developer_comments,\n }\n\n return output\n\n def __str__(self):\n return str(self.todict())\n\n\nfieldname_map = {\n 'original': 'source',\n 'untranslated': 'source',\n 'translated': 'target',\n 'translation': 'target',\n 'identified': 'id',\n 'key': 'id',\n 'label': 'id',\n 'translator comments': 'translator_comments',\n 'notes': 'translator_comments',\n 'developer comments': 'developer_comments',\n 'state': 'fuzzy',\n}\n\n\nEXTRA_KEY = '__CSVL10N__EXTRA__'\n\n\ndef try_dialects(inputfile, fieldnames, dialect):\n #FIXME: does it verify at all if we don't actually step through the file?\n try:\n inputfile.seek(0)\n reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect=dialect, restkey=EXTRA_KEY)\n except csv.Error:\n try:\n inputfile.seek(0)\n reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect='default', restkey=EXTRA_KEY)\n except csv.Error:\n inputfile.seek(0)\n reader = csv.DictReader(inputfile, fieldnames=fieldnames, dialect='excel', restkey=EXTRA_KEY)\n return reader\n\n\ndef valid_fieldnames(fieldnames):\n \"\"\"Check if fieldnames are valid, that is at least one field is identified\n as the source.\n \"\"\"\n for fieldname in fieldnames:\n if fieldname == 'source':\n return True\n elif fieldname in fieldname_map and fieldname_map[fieldname] == 'source':\n return True\n return False\n\n\ndef detect_header(sample, dialect, fieldnames):\n \"\"\"Test if file has a header or not, also returns number of columns in first row\"\"\"\n inputfile = csv.StringIO(sample)\n try:\n reader = csv.reader(inputfile, dialect)\n except csv.Error:\n try:\n inputfile.seek(0)\n reader = csv.reader(inputfile, 'default')\n except csv.Error:\n inputfile.seek(0)\n reader = csv.reader(inputfile, 'excel')\n\n header = next(reader)\n columncount = max(len(header), 3)\n if valid_fieldnames(header):\n return header\n return fieldnames[:columncount]\n\n\nclass csvfile(base.TranslationStore):\n \"\"\"This class represents a .csv file with various lines. The default\n format contains three columns: location, source, target\n \"\"\"\n\n UnitClass = csvunit\n Name = \"Comma Separated Value\"\n Mimetypes = ['text/comma-separated-values', 'text/csv']\n Extensions = [\"csv\"]\n\n def __init__(self, inputfile=None, fieldnames=None, encoding='auto'):\n super(csvfile, self).__init__(encoding=encoding)\n if not fieldnames:\n self.fieldnames = ['location', 'source', 'target', 'id', 'fuzzy', 'context', 'translator_comments', 'developer_comments']\n else:\n self.fieldnames = fieldnames\n self.filename = getattr(inputfile, 'name', '')\n self.dialect = 'default'\n if inputfile is not None:\n csvsrc = inputfile.read()\n inputfile.close()\n self.parse(csvsrc)\n\n def parse(self, csvsrc):\n text, encoding = self.detect_encoding(csvsrc, default_encodings=['utf-8', 'utf-16'])\n #FIXME: raise parse error if encoding detection fails?\n self.encoding = encoding or 'utf-8'\n\n sniffer = csv.Sniffer()\n sample = text[:1024]\n\n try:\n self.dialect = sniffer.sniff(sample)\n if self.dialect.quoting == csv.QUOTE_MINIMAL:\n #HACKISH: most probably a default, not real detection\n self.dialect.quoting = csv.QUOTE_ALL\n self.dialect.doublequote = True\n except csv.Error:\n self.dialect = 'default'\n\n try:\n fieldnames = detect_header(sample, self.dialect, self.fieldnames)\n self.fieldnames = fieldnames\n except csv.Error:\n pass\n\n inputfile = csv.StringIO(text)\n reader = try_dialects(inputfile, self.fieldnames, self.dialect)\n\n first_row = True\n for row in reader:\n newce = self.UnitClass()\n newce.fromdict(row)\n if not first_row or not newce.match_header():\n self.addunit(newce)\n first_row = False\n\n def serialize(self, out):\n \"\"\"Write to file\"\"\"\n source = self.getoutput()\n if isinstance(source, six.text_type):\n # Python 3\n out.write(source.encode(self.encoding))\n else:\n out.write(source)\n\n def getoutput(self):\n output = csv.StringIO()\n writer = csv.DictWriter(output, self.fieldnames,\n extrasaction='ignore',\n dialect=self.dialect)\n writer.writeheader()\n for ce in self.units:\n writer.writerow(ce.todict())\n return output.getvalue()\n", "path": "translate/storage/csvl10n.py"}]}
| 3,890 | 220 |
gh_patches_debug_9163
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 3.8 support to setup.py
We've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.
We should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = "opsdroid"
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, "README.md"), encoding="utf8").read()
12
13 PACKAGES = find_packages(
14 exclude=["tests", "tests.*", "modules", "modules.*", "docs", "docs.*"]
15 )
16
17
18 # For now we simply define the install_requires based on the contents
19 # of requirements.txt. In the future, install_requires may become much
20 # looser than the (automatically) resolved requirements.txt.
21 with open(os.path.join(HERE, "requirements.txt"), "r") as fh:
22 REQUIRES = [line.strip() for line in fh]
23
24
25 class Develop(develop):
26 """Custom `develop` command to always build mo files on install -e."""
27
28 def run(self):
29 self.run_command("compile_catalog")
30 develop.run(self) # old style class
31
32
33 class BuildPy(build_py):
34 """Custom `build_py` command to always build mo files for wheels."""
35
36 def run(self):
37 self.run_command("compile_catalog")
38 build_py.run(self) # old style class
39
40
41 class Sdist(sdist):
42 """Custom `sdist` command to ensure that mo files are always created."""
43
44 def run(self):
45 self.run_command("compile_catalog")
46 sdist.run(self) # old style class
47
48
49 setup(
50 name=PACKAGE_NAME,
51 version=versioneer.get_version(),
52 license="Apache License 2.0",
53 url="https://opsdroid.github.io/",
54 download_url="https://github.com/opsdroid/opsdroid/releases",
55 author="Jacob Tomlinson",
56 author_email="[email protected]",
57 description="An open source ChatOps bot framework.",
58 long_description=README,
59 long_description_content_type="text/markdown",
60 packages=PACKAGES,
61 include_package_data=True,
62 zip_safe=False,
63 platforms="any",
64 classifiers=[
65 "Development Status :: 4 - Beta",
66 "Environment :: Console",
67 "Framework :: AsyncIO",
68 "Intended Audience :: Developers",
69 "Intended Audience :: System Administrators",
70 "Intended Audience :: Information Technology",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3 :: Only",
75 "Programming Language :: Python :: 3.6",
76 "Programming Language :: Python :: 3.7",
77 "Topic :: Communications :: Chat",
78 "Topic :: Scientific/Engineering :: Artificial Intelligence",
79 "Topic :: Software Development :: Libraries :: Python Modules",
80 ],
81 install_requires=REQUIRES,
82 test_suite="tests",
83 keywords=[
84 "bot",
85 "bot-framework",
86 "opsdroid",
87 "botkit",
88 "python3",
89 "asyncio",
90 "chatops",
91 "devops",
92 "nlu",
93 ],
94 setup_requires=["Babel"],
95 cmdclass=versioneer.get_cmdclass(
96 {"sdist": Sdist, "build_py": BuildPy, "develop": Develop}
97 ),
98 entry_points={"console_scripts": ["opsdroid = opsdroid.cli:cli"]},
99 )
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,6 +74,7 @@
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Topic :: Communications :: Chat",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,6 +74,7 @@\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n", "issue": "Add 3.8 support to setup.py\nWe've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.\r\n\r\nWe should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}]}
| 1,280 | 112 |
gh_patches_debug_14665
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-9517
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SVG static assets broken and not displayed
Every asset with svg type are displayed as broken in web interface.

We installed sentry 9 with python and nginx as proxy on CentOS 7.2
When I look the content-type returned, svg files are not recognized.
```
$ curl -v https://sentry/_static/1533634345/sentry/dist/php.svg
(...)
< Content-Type: application/octet-stream
```
If I check the file on server, the svg is not recognized.
```
$ file php.svg
php.svg: HTML document, ASCII text, with very long lines, with no line terminators
```
If we serve the file directly with nginx, the correct mime type is sent as _image/svg+xml_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/runner/settings.py`
Content:
```
1 """
2 sentry.runner.settings
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
6 :license: BSD, see LICENSE for more details.
7 """
8 from __future__ import absolute_import, print_function
9
10 import os
11 import click
12
13 DEFAULT_SETTINGS_MODULE = 'sentry.conf.server'
14 DEFAULT_SETTINGS_CONF = 'config.yml'
15 DEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'
16
17
18 def generate_secret_key():
19 from django.utils.crypto import get_random_string
20 chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'
21 return get_random_string(50, chars)
22
23
24 def load_config_template(path, version='default'):
25 from pkg_resources import resource_string
26 return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')
27
28
29 def generate_settings(dev=False):
30 """
31 This command is run when ``default_path`` doesn't exist, or ``init`` is
32 run and returns a string representing the default data to put into their
33 settings file.
34 """
35 context = {
36 'secret_key': generate_secret_key(),
37 'debug_flag': dev,
38 'mail.backend': 'console' if dev else 'smtp',
39 }
40
41 py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context
42 yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context
43 return py, yaml
44
45
46 def get_sentry_conf():
47 """
48 Fetch the SENTRY_CONF value, either from the click context
49 if available, or SENTRY_CONF environment variable.
50 """
51 try:
52 ctx = click.get_current_context()
53 return ctx.obj['config']
54 except (RuntimeError, KeyError, TypeError):
55 try:
56 return os.environ['SENTRY_CONF']
57 except KeyError:
58 return '~/.sentry'
59
60
61 def discover_configs():
62 """
63 Discover the locations of three configuration components:
64 * Config directory (~/.sentry)
65 * Optional python config file (~/.sentry/sentry.conf.py)
66 * Optional yaml config (~/.sentry/config.yml)
67 """
68 try:
69 config = os.environ['SENTRY_CONF']
70 except KeyError:
71 config = '~/.sentry'
72
73 config = os.path.expanduser(config)
74
75 # This is the old, now deprecated code path where SENTRY_CONF is pointed directly
76 # to a python file
77 if config.endswith(('.py', '.conf')) or os.path.isfile(config):
78 return (os.path.dirname(config), config, None, )
79
80 return (
81 config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),
82 os.path.join(config, DEFAULT_SETTINGS_CONF),
83 )
84
85
86 def configure(ctx, py, yaml, skip_service_validation=False):
87 """
88 Given the two different config files, set up the environment.
89
90 NOTE: Will only execute once, so it's safe to call multiple times.
91 """
92 global __installed
93 if __installed:
94 return
95
96 # Make sure that our warnings are always displayed
97 import warnings
98 warnings.filterwarnings('default', '', Warning, r'^sentry')
99
100 # Add in additional mimetypes that are useful for our static files
101 # which aren't common in default system registries
102 import mimetypes
103 for type, ext in (
104 ('application/json', 'map'), ('application/font-woff', 'woff'),
105 ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
106 ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
107 'ttc'), ('font/opentype', 'otf'),
108 ):
109 mimetypes.add_type(type, '.' + ext)
110
111 from .importer import install
112
113 if yaml is None:
114 # `yaml` will be None when SENTRY_CONF is pointed
115 # directly to a file, in which case, this file must exist
116 if not os.path.exists(py):
117 if ctx:
118 raise click.ClickException(
119 "Configuration file does not exist. Use 'sentry init' to initialize the file."
120 )
121 raise ValueError(
122 "Configuration file does not exist at '%s'" % click.format_filename(py)
123 )
124 elif not os.path.exists(yaml) and not os.path.exists(py):
125 if ctx:
126 raise click.ClickException(
127 "Configuration file does not exist. Use 'sentry init' to initialize the file."
128 )
129 raise ValueError("Configuration file does not exist at '%s'" % click.format_filename(yaml))
130
131 # Add autoreload for config.yml file if needed
132 if yaml is not None and os.path.exists(yaml):
133 from sentry.utils.uwsgi import reload_on_change
134 reload_on_change(yaml)
135
136 os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'
137
138 install('sentry_config', py, DEFAULT_SETTINGS_MODULE)
139
140 # HACK: we need to force access of django.conf.settings to
141 # ensure we don't hit any import-driven recursive behavior
142 from django.conf import settings
143 hasattr(settings, 'INSTALLED_APPS')
144
145 from .initializer import initialize_app, on_configure
146 initialize_app(
147 {
148 'config_path': py,
149 'settings': settings,
150 'options': yaml,
151 },
152 skip_service_validation=skip_service_validation
153 )
154 on_configure({'settings': settings})
155
156 __installed = True
157
158
159 __installed = False
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/runner/settings.py b/src/sentry/runner/settings.py
--- a/src/sentry/runner/settings.py
+++ b/src/sentry/runner/settings.py
@@ -101,10 +101,14 @@
# which aren't common in default system registries
import mimetypes
for type, ext in (
- ('application/json', 'map'), ('application/font-woff', 'woff'),
- ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),
- ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',
- 'ttc'), ('font/opentype', 'otf'),
+ ('application/json', 'map'),
+ ('application/font-woff', 'woff'),
+ ('application/font-woff2', 'woff2'),
+ ('application/vnd.ms-fontobject', 'eot'),
+ ('application/x-font-ttf', 'ttf'),
+ ('application/x-font-ttf', 'ttc'),
+ ('font/opentype', 'otf'),
+ ('image/svg+xml', 'svg'),
):
mimetypes.add_type(type, '.' + ext)
|
{"golden_diff": "diff --git a/src/sentry/runner/settings.py b/src/sentry/runner/settings.py\n--- a/src/sentry/runner/settings.py\n+++ b/src/sentry/runner/settings.py\n@@ -101,10 +101,14 @@\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n- ('application/json', 'map'), ('application/font-woff', 'woff'),\n- ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),\n- ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',\n- 'ttc'), ('font/opentype', 'otf'),\n+ ('application/json', 'map'),\n+ ('application/font-woff', 'woff'),\n+ ('application/font-woff2', 'woff2'),\n+ ('application/vnd.ms-fontobject', 'eot'),\n+ ('application/x-font-ttf', 'ttf'),\n+ ('application/x-font-ttf', 'ttc'),\n+ ('font/opentype', 'otf'),\n+ ('image/svg+xml', 'svg'),\n ):\n mimetypes.add_type(type, '.' + ext)\n", "issue": "SVG static assets broken and not displayed\nEvery asset with svg type are displayed as broken in web interface.\r\n\r\n\r\n\r\nWe installed sentry 9 with python and nginx as proxy on CentOS 7.2\r\n\r\nWhen I look the content-type returned, svg files are not recognized.\r\n\r\n```\r\n$ curl -v https://sentry/_static/1533634345/sentry/dist/php.svg\r\n(...)\r\n< Content-Type: application/octet-stream\r\n```\r\n\r\nIf I check the file on server, the svg is not recognized.\r\n\r\n```\r\n$ file php.svg \r\nphp.svg: HTML document, ASCII text, with very long lines, with no line terminators\r\n```\r\n\r\nIf we serve the file directly with nginx, the correct mime type is sent as _image/svg+xml_\r\n\n", "before_files": [{"content": "\"\"\"\nsentry.runner.settings\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport click\n\nDEFAULT_SETTINGS_MODULE = 'sentry.conf.server'\nDEFAULT_SETTINGS_CONF = 'config.yml'\nDEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'\n\n\ndef generate_secret_key():\n from django.utils.crypto import get_random_string\n chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'\n return get_random_string(50, chars)\n\n\ndef load_config_template(path, version='default'):\n from pkg_resources import resource_string\n return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')\n\n\ndef generate_settings(dev=False):\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n context = {\n 'secret_key': generate_secret_key(),\n 'debug_flag': dev,\n 'mail.backend': 'console' if dev else 'smtp',\n }\n\n py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context\n yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context\n return py, yaml\n\n\ndef get_sentry_conf():\n \"\"\"\n Fetch the SENTRY_CONF value, either from the click context\n if available, or SENTRY_CONF environment variable.\n \"\"\"\n try:\n ctx = click.get_current_context()\n return ctx.obj['config']\n except (RuntimeError, KeyError, TypeError):\n try:\n return os.environ['SENTRY_CONF']\n except KeyError:\n return '~/.sentry'\n\n\ndef discover_configs():\n \"\"\"\n Discover the locations of three configuration components:\n * Config directory (~/.sentry)\n * Optional python config file (~/.sentry/sentry.conf.py)\n * Optional yaml config (~/.sentry/config.yml)\n \"\"\"\n try:\n config = os.environ['SENTRY_CONF']\n except KeyError:\n config = '~/.sentry'\n\n config = os.path.expanduser(config)\n\n # This is the old, now deprecated code path where SENTRY_CONF is pointed directly\n # to a python file\n if config.endswith(('.py', '.conf')) or os.path.isfile(config):\n return (os.path.dirname(config), config, None, )\n\n return (\n config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),\n os.path.join(config, DEFAULT_SETTINGS_CONF),\n )\n\n\ndef configure(ctx, py, yaml, skip_service_validation=False):\n \"\"\"\n Given the two different config files, set up the environment.\n\n NOTE: Will only execute once, so it's safe to call multiple times.\n \"\"\"\n global __installed\n if __installed:\n return\n\n # Make sure that our warnings are always displayed\n import warnings\n warnings.filterwarnings('default', '', Warning, r'^sentry')\n\n # Add in additional mimetypes that are useful for our static files\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n ('application/json', 'map'), ('application/font-woff', 'woff'),\n ('application/font-woff2', 'woff2'), ('application/vnd.ms-fontobject', 'eot'),\n ('application/x-font-ttf', 'ttf'), ('application/x-font-ttf',\n 'ttc'), ('font/opentype', 'otf'),\n ):\n mimetypes.add_type(type, '.' + ext)\n\n from .importer import install\n\n if yaml is None:\n # `yaml` will be None when SENTRY_CONF is pointed\n # directly to a file, in which case, this file must exist\n if not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\n \"Configuration file does not exist at '%s'\" % click.format_filename(py)\n )\n elif not os.path.exists(yaml) and not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\"Configuration file does not exist at '%s'\" % click.format_filename(yaml))\n\n # Add autoreload for config.yml file if needed\n if yaml is not None and os.path.exists(yaml):\n from sentry.utils.uwsgi import reload_on_change\n reload_on_change(yaml)\n\n os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'\n\n install('sentry_config', py, DEFAULT_SETTINGS_MODULE)\n\n # HACK: we need to force access of django.conf.settings to\n # ensure we don't hit any import-driven recursive behavior\n from django.conf import settings\n hasattr(settings, 'INSTALLED_APPS')\n\n from .initializer import initialize_app, on_configure\n initialize_app(\n {\n 'config_path': py,\n 'settings': settings,\n 'options': yaml,\n },\n skip_service_validation=skip_service_validation\n )\n on_configure({'settings': settings})\n\n __installed = True\n\n\n__installed = False\n", "path": "src/sentry/runner/settings.py"}], "after_files": [{"content": "\"\"\"\nsentry.runner.settings\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport click\n\nDEFAULT_SETTINGS_MODULE = 'sentry.conf.server'\nDEFAULT_SETTINGS_CONF = 'config.yml'\nDEFAULT_SETTINGS_OVERRIDE = 'sentry.conf.py'\n\n\ndef generate_secret_key():\n from django.utils.crypto import get_random_string\n chars = u'abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'\n return get_random_string(50, chars)\n\n\ndef load_config_template(path, version='default'):\n from pkg_resources import resource_string\n return resource_string('sentry', 'data/config/%s.%s' % (path, version)).decode('utf8')\n\n\ndef generate_settings(dev=False):\n \"\"\"\n This command is run when ``default_path`` doesn't exist, or ``init`` is\n run and returns a string representing the default data to put into their\n settings file.\n \"\"\"\n context = {\n 'secret_key': generate_secret_key(),\n 'debug_flag': dev,\n 'mail.backend': 'console' if dev else 'smtp',\n }\n\n py = load_config_template(DEFAULT_SETTINGS_OVERRIDE, 'default') % context\n yaml = load_config_template(DEFAULT_SETTINGS_CONF, 'default') % context\n return py, yaml\n\n\ndef get_sentry_conf():\n \"\"\"\n Fetch the SENTRY_CONF value, either from the click context\n if available, or SENTRY_CONF environment variable.\n \"\"\"\n try:\n ctx = click.get_current_context()\n return ctx.obj['config']\n except (RuntimeError, KeyError, TypeError):\n try:\n return os.environ['SENTRY_CONF']\n except KeyError:\n return '~/.sentry'\n\n\ndef discover_configs():\n \"\"\"\n Discover the locations of three configuration components:\n * Config directory (~/.sentry)\n * Optional python config file (~/.sentry/sentry.conf.py)\n * Optional yaml config (~/.sentry/config.yml)\n \"\"\"\n try:\n config = os.environ['SENTRY_CONF']\n except KeyError:\n config = '~/.sentry'\n\n config = os.path.expanduser(config)\n\n # This is the old, now deprecated code path where SENTRY_CONF is pointed directly\n # to a python file\n if config.endswith(('.py', '.conf')) or os.path.isfile(config):\n return (os.path.dirname(config), config, None, )\n\n return (\n config, os.path.join(config, DEFAULT_SETTINGS_OVERRIDE),\n os.path.join(config, DEFAULT_SETTINGS_CONF),\n )\n\n\ndef configure(ctx, py, yaml, skip_service_validation=False):\n \"\"\"\n Given the two different config files, set up the environment.\n\n NOTE: Will only execute once, so it's safe to call multiple times.\n \"\"\"\n global __installed\n if __installed:\n return\n\n # Make sure that our warnings are always displayed\n import warnings\n warnings.filterwarnings('default', '', Warning, r'^sentry')\n\n # Add in additional mimetypes that are useful for our static files\n # which aren't common in default system registries\n import mimetypes\n for type, ext in (\n ('application/json', 'map'),\n ('application/font-woff', 'woff'),\n ('application/font-woff2', 'woff2'),\n ('application/vnd.ms-fontobject', 'eot'),\n ('application/x-font-ttf', 'ttf'),\n ('application/x-font-ttf', 'ttc'),\n ('font/opentype', 'otf'),\n ('image/svg+xml', 'svg'),\n ):\n mimetypes.add_type(type, '.' + ext)\n\n from .importer import install\n\n if yaml is None:\n # `yaml` will be None when SENTRY_CONF is pointed\n # directly to a file, in which case, this file must exist\n if not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\n \"Configuration file does not exist at '%s'\" % click.format_filename(py)\n )\n elif not os.path.exists(yaml) and not os.path.exists(py):\n if ctx:\n raise click.ClickException(\n \"Configuration file does not exist. Use 'sentry init' to initialize the file.\"\n )\n raise ValueError(\"Configuration file does not exist at '%s'\" % click.format_filename(yaml))\n\n # Add autoreload for config.yml file if needed\n if yaml is not None and os.path.exists(yaml):\n from sentry.utils.uwsgi import reload_on_change\n reload_on_change(yaml)\n\n os.environ['DJANGO_SETTINGS_MODULE'] = 'sentry_config'\n\n install('sentry_config', py, DEFAULT_SETTINGS_MODULE)\n\n # HACK: we need to force access of django.conf.settings to\n # ensure we don't hit any import-driven recursive behavior\n from django.conf import settings\n hasattr(settings, 'INSTALLED_APPS')\n\n from .initializer import initialize_app, on_configure\n initialize_app(\n {\n 'config_path': py,\n 'settings': settings,\n 'options': yaml,\n },\n skip_service_validation=skip_service_validation\n )\n on_configure({'settings': settings})\n\n __installed = True\n\n\n__installed = False\n", "path": "src/sentry/runner/settings.py"}]}
| 2,029 | 265 |
gh_patches_debug_28156
|
rasdani/github-patches
|
git_diff
|
nltk__nltk-3080
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'itertools.chain' object has no attribute 'lookup'
nltk version: 3.7.
python version: 3.9.15
From the following code:
```
padded_line = [list(pad_both_ends(word_tokenize(input_text), n=N))]
train_data, vocab = padded_everygram_pipeline(order=N, text=padded_line)
lm = Lidstone(gamma=0.2, vocabulary=vocab, order=N)
lm.fit(text=train_data)
```
where input_text is a long string and N as in the Ngram model. From the last line of code above, I get the following error:
>lm.fit(text=train_data)
> File "env\lib\site-packages\nltk\lm\api.py", line 110, in fit
> self.counts.update(self.vocab.lookup(sent) for sent in text)
> File "env\lib\site-packages\nltk\lm\counter.py", line 115, in update
> for sent in ngram_text:
> File "env\lib\site-packages\nltk\lm\api.py", line 110, in <genexpr>
> self.counts.update(self.vocab.lookup(sent) for sent in text)
> AttributeError: 'itertools.chain' object has no attribute 'lookup'
Can anyone fix this error for me?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nltk/lm/api.py`
Content:
```
1 # Natural Language Toolkit: Language Models
2 #
3 # Copyright (C) 2001-2022 NLTK Project
4 # Authors: Ilia Kurenkov <[email protected]>
5 # URL: <https://www.nltk.org/>
6 # For license information, see LICENSE.TXT
7 """Language Model Interface."""
8
9 import random
10 from abc import ABCMeta, abstractmethod
11 from bisect import bisect
12 from itertools import accumulate
13
14 from nltk.lm.counter import NgramCounter
15 from nltk.lm.util import log_base2
16 from nltk.lm.vocabulary import Vocabulary
17
18
19 class Smoothing(metaclass=ABCMeta):
20 """Ngram Smoothing Interface
21
22 Implements Chen & Goodman 1995's idea that all smoothing algorithms have
23 certain features in common. This should ideally allow smoothing algorithms to
24 work both with Backoff and Interpolation.
25 """
26
27 def __init__(self, vocabulary, counter):
28 """
29 :param vocabulary: The Ngram vocabulary object.
30 :type vocabulary: nltk.lm.vocab.Vocabulary
31 :param counter: The counts of the vocabulary items.
32 :type counter: nltk.lm.counter.NgramCounter
33 """
34 self.vocab = vocabulary
35 self.counts = counter
36
37 @abstractmethod
38 def unigram_score(self, word):
39 raise NotImplementedError()
40
41 @abstractmethod
42 def alpha_gamma(self, word, context):
43 raise NotImplementedError()
44
45
46 def _mean(items):
47 """Return average (aka mean) for sequence of items."""
48 return sum(items) / len(items)
49
50
51 def _random_generator(seed_or_generator):
52 if isinstance(seed_or_generator, random.Random):
53 return seed_or_generator
54 return random.Random(seed_or_generator)
55
56
57 def _weighted_choice(population, weights, random_generator=None):
58 """Like random.choice, but with weights.
59
60 Heavily inspired by python 3.6 `random.choices`.
61 """
62 if not population:
63 raise ValueError("Can't choose from empty population")
64 if len(population) != len(weights):
65 raise ValueError("The number of weights does not match the population")
66 cum_weights = list(accumulate(weights))
67 total = cum_weights[-1]
68 threshold = random_generator.random()
69 return population[bisect(cum_weights, total * threshold)]
70
71
72 class LanguageModel(metaclass=ABCMeta):
73 """ABC for Language Models.
74
75 Cannot be directly instantiated itself.
76
77 """
78
79 def __init__(self, order, vocabulary=None, counter=None):
80 """Creates new LanguageModel.
81
82 :param vocabulary: If provided, this vocabulary will be used instead
83 of creating a new one when training.
84 :type vocabulary: `nltk.lm.Vocabulary` or None
85 :param counter: If provided, use this object to count ngrams.
86 :type vocabulary: `nltk.lm.NgramCounter` or None
87 :param ngrams_fn: If given, defines how sentences in training text are turned to ngram
88 sequences.
89 :type ngrams_fn: function or None
90 :param pad_fn: If given, defines how sentences in training text are padded.
91 :type pad_fn: function or None
92 """
93 self.order = order
94 self.vocab = Vocabulary() if vocabulary is None else vocabulary
95 self.counts = NgramCounter() if counter is None else counter
96
97 def fit(self, text, vocabulary_text=None):
98 """Trains the model on a text.
99
100 :param text: Training text as a sequence of sentences.
101
102 """
103 if not self.vocab:
104 if vocabulary_text is None:
105 raise ValueError(
106 "Cannot fit without a vocabulary or text to create it from."
107 )
108 self.vocab.update(vocabulary_text)
109 self.counts.update(self.vocab.lookup(sent) for sent in text)
110
111 def score(self, word, context=None):
112 """Masks out of vocab (OOV) words and computes their model score.
113
114 For model-specific logic of calculating scores, see the `unmasked_score`
115 method.
116 """
117 return self.unmasked_score(
118 self.vocab.lookup(word), self.vocab.lookup(context) if context else None
119 )
120
121 @abstractmethod
122 def unmasked_score(self, word, context=None):
123 """Score a word given some optional context.
124
125 Concrete models are expected to provide an implementation.
126 Note that this method does not mask its arguments with the OOV label.
127 Use the `score` method for that.
128
129 :param str word: Word for which we want the score
130 :param tuple(str) context: Context the word is in.
131 If `None`, compute unigram score.
132 :param context: tuple(str) or None
133 :rtype: float
134 """
135 raise NotImplementedError()
136
137 def logscore(self, word, context=None):
138 """Evaluate the log score of this word in this context.
139
140 The arguments are the same as for `score` and `unmasked_score`.
141
142 """
143 return log_base2(self.score(word, context))
144
145 def context_counts(self, context):
146 """Helper method for retrieving counts for a given context.
147
148 Assumes context has been checked and oov words in it masked.
149 :type context: tuple(str) or None
150
151 """
152 return (
153 self.counts[len(context) + 1][context] if context else self.counts.unigrams
154 )
155
156 def entropy(self, text_ngrams):
157 """Calculate cross-entropy of model for given evaluation text.
158
159 :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples.
160 :rtype: float
161
162 """
163 return -1 * _mean(
164 [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]
165 )
166
167 def perplexity(self, text_ngrams):
168 """Calculates the perplexity of the given text.
169
170 This is simply 2 ** cross-entropy for the text, so the arguments are the same.
171
172 """
173 return pow(2.0, self.entropy(text_ngrams))
174
175 def generate(self, num_words=1, text_seed=None, random_seed=None):
176 """Generate words from the model.
177
178 :param int num_words: How many words to generate. By default 1.
179 :param text_seed: Generation can be conditioned on preceding context.
180 :param random_seed: A random seed or an instance of `random.Random`. If provided,
181 makes the random sampling part of generation reproducible.
182 :return: One (str) word or a list of words generated from model.
183
184 Examples:
185
186 >>> from nltk.lm import MLE
187 >>> lm = MLE(2)
188 >>> lm.fit([[("a", "b"), ("b", "c")]], vocabulary_text=['a', 'b', 'c'])
189 >>> lm.fit([[("a",), ("b",), ("c",)]])
190 >>> lm.generate(random_seed=3)
191 'a'
192 >>> lm.generate(text_seed=['a'])
193 'b'
194
195 """
196 text_seed = [] if text_seed is None else list(text_seed)
197 random_generator = _random_generator(random_seed)
198 # This is the base recursion case.
199 if num_words == 1:
200 context = (
201 text_seed[-self.order + 1 :]
202 if len(text_seed) >= self.order
203 else text_seed
204 )
205 samples = self.context_counts(self.vocab.lookup(context))
206 while context and not samples:
207 context = context[1:] if len(context) > 1 else []
208 samples = self.context_counts(self.vocab.lookup(context))
209 # Sorting samples achieves two things:
210 # - reproducible randomness when sampling
211 # - turns Mapping into Sequence which `_weighted_choice` expects
212 samples = sorted(samples)
213 return _weighted_choice(
214 samples,
215 tuple(self.score(w, context) for w in samples),
216 random_generator,
217 )
218 # We build up text one word at a time using the preceding context.
219 generated = []
220 for _ in range(num_words):
221 generated.append(
222 self.generate(
223 num_words=1,
224 text_seed=text_seed + generated,
225 random_seed=random_generator,
226 )
227 )
228 return generated
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nltk/lm/api.py b/nltk/lm/api.py
--- a/nltk/lm/api.py
+++ b/nltk/lm/api.py
@@ -7,6 +7,7 @@
"""Language Model Interface."""
import random
+import warnings
from abc import ABCMeta, abstractmethod
from bisect import bisect
from itertools import accumulate
@@ -83,7 +84,7 @@
of creating a new one when training.
:type vocabulary: `nltk.lm.Vocabulary` or None
:param counter: If provided, use this object to count ngrams.
- :type vocabulary: `nltk.lm.NgramCounter` or None
+ :type counter: `nltk.lm.NgramCounter` or None
:param ngrams_fn: If given, defines how sentences in training text are turned to ngram
sequences.
:type ngrams_fn: function or None
@@ -91,6 +92,12 @@
:type pad_fn: function or None
"""
self.order = order
+ if vocabulary and not isinstance(vocabulary, Vocabulary):
+ warnings.warn(
+ f"The `vocabulary` argument passed to {self.__class__.__name__!r} "
+ "must be an instance of `nltk.lm.Vocabulary`.",
+ stacklevel=3,
+ )
self.vocab = Vocabulary() if vocabulary is None else vocabulary
self.counts = NgramCounter() if counter is None else counter
|
{"golden_diff": "diff --git a/nltk/lm/api.py b/nltk/lm/api.py\n--- a/nltk/lm/api.py\n+++ b/nltk/lm/api.py\n@@ -7,6 +7,7 @@\n \"\"\"Language Model Interface.\"\"\"\n \n import random\n+import warnings\n from abc import ABCMeta, abstractmethod\n from bisect import bisect\n from itertools import accumulate\n@@ -83,7 +84,7 @@\n of creating a new one when training.\n :type vocabulary: `nltk.lm.Vocabulary` or None\n :param counter: If provided, use this object to count ngrams.\n- :type vocabulary: `nltk.lm.NgramCounter` or None\n+ :type counter: `nltk.lm.NgramCounter` or None\n :param ngrams_fn: If given, defines how sentences in training text are turned to ngram\n sequences.\n :type ngrams_fn: function or None\n@@ -91,6 +92,12 @@\n :type pad_fn: function or None\n \"\"\"\n self.order = order\n+ if vocabulary and not isinstance(vocabulary, Vocabulary):\n+ warnings.warn(\n+ f\"The `vocabulary` argument passed to {self.__class__.__name__!r} \"\n+ \"must be an instance of `nltk.lm.Vocabulary`.\",\n+ stacklevel=3,\n+ )\n self.vocab = Vocabulary() if vocabulary is None else vocabulary\n self.counts = NgramCounter() if counter is None else counter\n", "issue": "AttributeError: 'itertools.chain' object has no attribute 'lookup'\nnltk version: 3.7.\r\npython version: 3.9.15\r\nFrom the following code:\r\n\r\n```\r\npadded_line = [list(pad_both_ends(word_tokenize(input_text), n=N))]\r\ntrain_data, vocab = padded_everygram_pipeline(order=N, text=padded_line)\r\nlm = Lidstone(gamma=0.2, vocabulary=vocab, order=N)\r\nlm.fit(text=train_data)\r\n```\r\n\r\nwhere input_text is a long string and N as in the Ngram model. From the last line of code above, I get the following error:\r\n\r\n>lm.fit(text=train_data)\r\n> File \"env\\lib\\site-packages\\nltk\\lm\\api.py\", line 110, in fit\r\n> self.counts.update(self.vocab.lookup(sent) for sent in text)\r\n> File \"env\\lib\\site-packages\\nltk\\lm\\counter.py\", line 115, in update\r\n> for sent in ngram_text:\r\n> File \"env\\lib\\site-packages\\nltk\\lm\\api.py\", line 110, in <genexpr>\r\n> self.counts.update(self.vocab.lookup(sent) for sent in text)\r\n> AttributeError: 'itertools.chain' object has no attribute 'lookup'\r\n\r\nCan anyone fix this error for me?\n", "before_files": [{"content": "# Natural Language Toolkit: Language Models\n#\n# Copyright (C) 2001-2022 NLTK Project\n# Authors: Ilia Kurenkov <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\"\"\"Language Model Interface.\"\"\"\n\nimport random\nfrom abc import ABCMeta, abstractmethod\nfrom bisect import bisect\nfrom itertools import accumulate\n\nfrom nltk.lm.counter import NgramCounter\nfrom nltk.lm.util import log_base2\nfrom nltk.lm.vocabulary import Vocabulary\n\n\nclass Smoothing(metaclass=ABCMeta):\n \"\"\"Ngram Smoothing Interface\n\n Implements Chen & Goodman 1995's idea that all smoothing algorithms have\n certain features in common. This should ideally allow smoothing algorithms to\n work both with Backoff and Interpolation.\n \"\"\"\n\n def __init__(self, vocabulary, counter):\n \"\"\"\n :param vocabulary: The Ngram vocabulary object.\n :type vocabulary: nltk.lm.vocab.Vocabulary\n :param counter: The counts of the vocabulary items.\n :type counter: nltk.lm.counter.NgramCounter\n \"\"\"\n self.vocab = vocabulary\n self.counts = counter\n\n @abstractmethod\n def unigram_score(self, word):\n raise NotImplementedError()\n\n @abstractmethod\n def alpha_gamma(self, word, context):\n raise NotImplementedError()\n\n\ndef _mean(items):\n \"\"\"Return average (aka mean) for sequence of items.\"\"\"\n return sum(items) / len(items)\n\n\ndef _random_generator(seed_or_generator):\n if isinstance(seed_or_generator, random.Random):\n return seed_or_generator\n return random.Random(seed_or_generator)\n\n\ndef _weighted_choice(population, weights, random_generator=None):\n \"\"\"Like random.choice, but with weights.\n\n Heavily inspired by python 3.6 `random.choices`.\n \"\"\"\n if not population:\n raise ValueError(\"Can't choose from empty population\")\n if len(population) != len(weights):\n raise ValueError(\"The number of weights does not match the population\")\n cum_weights = list(accumulate(weights))\n total = cum_weights[-1]\n threshold = random_generator.random()\n return population[bisect(cum_weights, total * threshold)]\n\n\nclass LanguageModel(metaclass=ABCMeta):\n \"\"\"ABC for Language Models.\n\n Cannot be directly instantiated itself.\n\n \"\"\"\n\n def __init__(self, order, vocabulary=None, counter=None):\n \"\"\"Creates new LanguageModel.\n\n :param vocabulary: If provided, this vocabulary will be used instead\n of creating a new one when training.\n :type vocabulary: `nltk.lm.Vocabulary` or None\n :param counter: If provided, use this object to count ngrams.\n :type vocabulary: `nltk.lm.NgramCounter` or None\n :param ngrams_fn: If given, defines how sentences in training text are turned to ngram\n sequences.\n :type ngrams_fn: function or None\n :param pad_fn: If given, defines how sentences in training text are padded.\n :type pad_fn: function or None\n \"\"\"\n self.order = order\n self.vocab = Vocabulary() if vocabulary is None else vocabulary\n self.counts = NgramCounter() if counter is None else counter\n\n def fit(self, text, vocabulary_text=None):\n \"\"\"Trains the model on a text.\n\n :param text: Training text as a sequence of sentences.\n\n \"\"\"\n if not self.vocab:\n if vocabulary_text is None:\n raise ValueError(\n \"Cannot fit without a vocabulary or text to create it from.\"\n )\n self.vocab.update(vocabulary_text)\n self.counts.update(self.vocab.lookup(sent) for sent in text)\n\n def score(self, word, context=None):\n \"\"\"Masks out of vocab (OOV) words and computes their model score.\n\n For model-specific logic of calculating scores, see the `unmasked_score`\n method.\n \"\"\"\n return self.unmasked_score(\n self.vocab.lookup(word), self.vocab.lookup(context) if context else None\n )\n\n @abstractmethod\n def unmasked_score(self, word, context=None):\n \"\"\"Score a word given some optional context.\n\n Concrete models are expected to provide an implementation.\n Note that this method does not mask its arguments with the OOV label.\n Use the `score` method for that.\n\n :param str word: Word for which we want the score\n :param tuple(str) context: Context the word is in.\n If `None`, compute unigram score.\n :param context: tuple(str) or None\n :rtype: float\n \"\"\"\n raise NotImplementedError()\n\n def logscore(self, word, context=None):\n \"\"\"Evaluate the log score of this word in this context.\n\n The arguments are the same as for `score` and `unmasked_score`.\n\n \"\"\"\n return log_base2(self.score(word, context))\n\n def context_counts(self, context):\n \"\"\"Helper method for retrieving counts for a given context.\n\n Assumes context has been checked and oov words in it masked.\n :type context: tuple(str) or None\n\n \"\"\"\n return (\n self.counts[len(context) + 1][context] if context else self.counts.unigrams\n )\n\n def entropy(self, text_ngrams):\n \"\"\"Calculate cross-entropy of model for given evaluation text.\n\n :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples.\n :rtype: float\n\n \"\"\"\n return -1 * _mean(\n [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]\n )\n\n def perplexity(self, text_ngrams):\n \"\"\"Calculates the perplexity of the given text.\n\n This is simply 2 ** cross-entropy for the text, so the arguments are the same.\n\n \"\"\"\n return pow(2.0, self.entropy(text_ngrams))\n\n def generate(self, num_words=1, text_seed=None, random_seed=None):\n \"\"\"Generate words from the model.\n\n :param int num_words: How many words to generate. By default 1.\n :param text_seed: Generation can be conditioned on preceding context.\n :param random_seed: A random seed or an instance of `random.Random`. If provided,\n makes the random sampling part of generation reproducible.\n :return: One (str) word or a list of words generated from model.\n\n Examples:\n\n >>> from nltk.lm import MLE\n >>> lm = MLE(2)\n >>> lm.fit([[(\"a\", \"b\"), (\"b\", \"c\")]], vocabulary_text=['a', 'b', 'c'])\n >>> lm.fit([[(\"a\",), (\"b\",), (\"c\",)]])\n >>> lm.generate(random_seed=3)\n 'a'\n >>> lm.generate(text_seed=['a'])\n 'b'\n\n \"\"\"\n text_seed = [] if text_seed is None else list(text_seed)\n random_generator = _random_generator(random_seed)\n # This is the base recursion case.\n if num_words == 1:\n context = (\n text_seed[-self.order + 1 :]\n if len(text_seed) >= self.order\n else text_seed\n )\n samples = self.context_counts(self.vocab.lookup(context))\n while context and not samples:\n context = context[1:] if len(context) > 1 else []\n samples = self.context_counts(self.vocab.lookup(context))\n # Sorting samples achieves two things:\n # - reproducible randomness when sampling\n # - turns Mapping into Sequence which `_weighted_choice` expects\n samples = sorted(samples)\n return _weighted_choice(\n samples,\n tuple(self.score(w, context) for w in samples),\n random_generator,\n )\n # We build up text one word at a time using the preceding context.\n generated = []\n for _ in range(num_words):\n generated.append(\n self.generate(\n num_words=1,\n text_seed=text_seed + generated,\n random_seed=random_generator,\n )\n )\n return generated\n", "path": "nltk/lm/api.py"}], "after_files": [{"content": "# Natural Language Toolkit: Language Models\n#\n# Copyright (C) 2001-2022 NLTK Project\n# Authors: Ilia Kurenkov <[email protected]>\n# URL: <https://www.nltk.org/>\n# For license information, see LICENSE.TXT\n\"\"\"Language Model Interface.\"\"\"\n\nimport random\nimport warnings\nfrom abc import ABCMeta, abstractmethod\nfrom bisect import bisect\nfrom itertools import accumulate\n\nfrom nltk.lm.counter import NgramCounter\nfrom nltk.lm.util import log_base2\nfrom nltk.lm.vocabulary import Vocabulary\n\n\nclass Smoothing(metaclass=ABCMeta):\n \"\"\"Ngram Smoothing Interface\n\n Implements Chen & Goodman 1995's idea that all smoothing algorithms have\n certain features in common. This should ideally allow smoothing algorithms to\n work both with Backoff and Interpolation.\n \"\"\"\n\n def __init__(self, vocabulary, counter):\n \"\"\"\n :param vocabulary: The Ngram vocabulary object.\n :type vocabulary: nltk.lm.vocab.Vocabulary\n :param counter: The counts of the vocabulary items.\n :type counter: nltk.lm.counter.NgramCounter\n \"\"\"\n self.vocab = vocabulary\n self.counts = counter\n\n @abstractmethod\n def unigram_score(self, word):\n raise NotImplementedError()\n\n @abstractmethod\n def alpha_gamma(self, word, context):\n raise NotImplementedError()\n\n\ndef _mean(items):\n \"\"\"Return average (aka mean) for sequence of items.\"\"\"\n return sum(items) / len(items)\n\n\ndef _random_generator(seed_or_generator):\n if isinstance(seed_or_generator, random.Random):\n return seed_or_generator\n return random.Random(seed_or_generator)\n\n\ndef _weighted_choice(population, weights, random_generator=None):\n \"\"\"Like random.choice, but with weights.\n\n Heavily inspired by python 3.6 `random.choices`.\n \"\"\"\n if not population:\n raise ValueError(\"Can't choose from empty population\")\n if len(population) != len(weights):\n raise ValueError(\"The number of weights does not match the population\")\n cum_weights = list(accumulate(weights))\n total = cum_weights[-1]\n threshold = random_generator.random()\n return population[bisect(cum_weights, total * threshold)]\n\n\nclass LanguageModel(metaclass=ABCMeta):\n \"\"\"ABC for Language Models.\n\n Cannot be directly instantiated itself.\n\n \"\"\"\n\n def __init__(self, order, vocabulary=None, counter=None):\n \"\"\"Creates new LanguageModel.\n\n :param vocabulary: If provided, this vocabulary will be used instead\n of creating a new one when training.\n :type vocabulary: `nltk.lm.Vocabulary` or None\n :param counter: If provided, use this object to count ngrams.\n :type counter: `nltk.lm.NgramCounter` or None\n :param ngrams_fn: If given, defines how sentences in training text are turned to ngram\n sequences.\n :type ngrams_fn: function or None\n :param pad_fn: If given, defines how sentences in training text are padded.\n :type pad_fn: function or None\n \"\"\"\n self.order = order\n if vocabulary and not isinstance(vocabulary, Vocabulary):\n warnings.warn(\n f\"The `vocabulary` argument passed to {self.__class__.__name__!r} \"\n \"must be an instance of `nltk.lm.Vocabulary`.\",\n stacklevel=3,\n )\n self.vocab = Vocabulary() if vocabulary is None else vocabulary\n self.counts = NgramCounter() if counter is None else counter\n\n def fit(self, text, vocabulary_text=None):\n \"\"\"Trains the model on a text.\n\n :param text: Training text as a sequence of sentences.\n\n \"\"\"\n if not self.vocab:\n if vocabulary_text is None:\n raise ValueError(\n \"Cannot fit without a vocabulary or text to create it from.\"\n )\n self.vocab.update(vocabulary_text)\n self.counts.update(self.vocab.lookup(sent) for sent in text)\n\n def score(self, word, context=None):\n \"\"\"Masks out of vocab (OOV) words and computes their model score.\n\n For model-specific logic of calculating scores, see the `unmasked_score`\n method.\n \"\"\"\n return self.unmasked_score(\n self.vocab.lookup(word), self.vocab.lookup(context) if context else None\n )\n\n @abstractmethod\n def unmasked_score(self, word, context=None):\n \"\"\"Score a word given some optional context.\n\n Concrete models are expected to provide an implementation.\n Note that this method does not mask its arguments with the OOV label.\n Use the `score` method for that.\n\n :param str word: Word for which we want the score\n :param tuple(str) context: Context the word is in.\n If `None`, compute unigram score.\n :param context: tuple(str) or None\n :rtype: float\n \"\"\"\n raise NotImplementedError()\n\n def logscore(self, word, context=None):\n \"\"\"Evaluate the log score of this word in this context.\n\n The arguments are the same as for `score` and `unmasked_score`.\n\n \"\"\"\n return log_base2(self.score(word, context))\n\n def context_counts(self, context):\n \"\"\"Helper method for retrieving counts for a given context.\n\n Assumes context has been checked and oov words in it masked.\n :type context: tuple(str) or None\n\n \"\"\"\n return (\n self.counts[len(context) + 1][context] if context else self.counts.unigrams\n )\n\n def entropy(self, text_ngrams):\n \"\"\"Calculate cross-entropy of model for given evaluation text.\n\n :param Iterable(tuple(str)) text_ngrams: A sequence of ngram tuples.\n :rtype: float\n\n \"\"\"\n return -1 * _mean(\n [self.logscore(ngram[-1], ngram[:-1]) for ngram in text_ngrams]\n )\n\n def perplexity(self, text_ngrams):\n \"\"\"Calculates the perplexity of the given text.\n\n This is simply 2 ** cross-entropy for the text, so the arguments are the same.\n\n \"\"\"\n return pow(2.0, self.entropy(text_ngrams))\n\n def generate(self, num_words=1, text_seed=None, random_seed=None):\n \"\"\"Generate words from the model.\n\n :param int num_words: How many words to generate. By default 1.\n :param text_seed: Generation can be conditioned on preceding context.\n :param random_seed: A random seed or an instance of `random.Random`. If provided,\n makes the random sampling part of generation reproducible.\n :return: One (str) word or a list of words generated from model.\n\n Examples:\n\n >>> from nltk.lm import MLE\n >>> lm = MLE(2)\n >>> lm.fit([[(\"a\", \"b\"), (\"b\", \"c\")]], vocabulary_text=['a', 'b', 'c'])\n >>> lm.fit([[(\"a\",), (\"b\",), (\"c\",)]])\n >>> lm.generate(random_seed=3)\n 'a'\n >>> lm.generate(text_seed=['a'])\n 'b'\n\n \"\"\"\n text_seed = [] if text_seed is None else list(text_seed)\n random_generator = _random_generator(random_seed)\n # This is the base recursion case.\n if num_words == 1:\n context = (\n text_seed[-self.order + 1 :]\n if len(text_seed) >= self.order\n else text_seed\n )\n samples = self.context_counts(self.vocab.lookup(context))\n while context and not samples:\n context = context[1:] if len(context) > 1 else []\n samples = self.context_counts(self.vocab.lookup(context))\n # Sorting samples achieves two things:\n # - reproducible randomness when sampling\n # - turns Mapping into Sequence which `_weighted_choice` expects\n samples = sorted(samples)\n return _weighted_choice(\n samples,\n tuple(self.score(w, context) for w in samples),\n random_generator,\n )\n # We build up text one word at a time using the preceding context.\n generated = []\n for _ in range(num_words):\n generated.append(\n self.generate(\n num_words=1,\n text_seed=text_seed + generated,\n random_seed=random_generator,\n )\n )\n return generated\n", "path": "nltk/lm/api.py"}]}
| 2,877 | 331 |
gh_patches_debug_30665
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1542
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add readthedocs documentation for elasticsearch instrumentation
Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This library allows tracing HTTP elasticsearch made by the
17 `elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.
18
19 Usage
20 -----
21
22 .. code-block:: python
23
24 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
25 import elasticsearch
26
27
28 # instrument elasticsearch
29 ElasticsearchInstrumentor().instrument()
30
31 # Using elasticsearch as normal now will automatically generate spans
32 es = elasticsearch.Elasticsearch()
33 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
34 es.get(index='my-index', doc_type='my-type', id=1)
35
36 Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
37 can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
38 environment variable or by passing the prefix as an argument to the instrumentor. For example,
39
40
41 .. code-block:: python
42
43 ElasticsearchInstrumentor("my-custom-prefix").instrument()
44
45
46 The `instrument` method accepts the following keyword args:
47
48 tracer_provider (TracerProvider) - an optional tracer provider
49 request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
50 this function signature is:
51 def request_hook(span: Span, method: str, url: str, kwargs)
52 response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
53 this function signature is:
54 def response_hook(span: Span, response: dict)
55
56 for example:
57
58 .. code: python
59
60 from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor
61 import elasticsearch
62
63 def request_hook(span, method, url, kwargs):
64 if span and span.is_recording():
65 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
66
67 def response_hook(span, response):
68 if span and span.is_recording():
69 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
70
71 # instrument elasticsearch with request and response hooks
72 ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)
73
74 # Using elasticsearch as normal now will automatically generate spans,
75 # including user custom attributes added from the hooks
76 es = elasticsearch.Elasticsearch()
77 es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})
78 es.get(index='my-index', doc_type='my-type', id=1)
79
80 API
81 ---
82 """
83
84 import re
85 from logging import getLogger
86 from os import environ
87 from typing import Collection
88
89 import elasticsearch
90 import elasticsearch.exceptions
91 from wrapt import wrap_function_wrapper as _wrap
92
93 from opentelemetry.instrumentation.elasticsearch.package import _instruments
94 from opentelemetry.instrumentation.elasticsearch.version import __version__
95 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
96 from opentelemetry.instrumentation.utils import unwrap
97 from opentelemetry.semconv.trace import SpanAttributes
98 from opentelemetry.trace import SpanKind, get_tracer
99
100 logger = getLogger(__name__)
101
102
103 # Values to add as tags from the actual
104 # payload returned by Elasticsearch, if any.
105 _ATTRIBUTES_FROM_RESULT = [
106 "found",
107 "timed_out",
108 "took",
109 ]
110
111 _DEFAULT_OP_NAME = "request"
112
113
114 class ElasticsearchInstrumentor(BaseInstrumentor):
115 """An instrumentor for elasticsearch
116 See `BaseInstrumentor`
117 """
118
119 def __init__(self, span_name_prefix=None):
120 if not span_name_prefix:
121 span_name_prefix = environ.get(
122 "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX",
123 "Elasticsearch",
124 )
125 self._span_name_prefix = span_name_prefix.strip()
126 super().__init__()
127
128 def instrumentation_dependencies(self) -> Collection[str]:
129 return _instruments
130
131 def _instrument(self, **kwargs):
132 """
133 Instruments Elasticsearch module
134 """
135 tracer_provider = kwargs.get("tracer_provider")
136 tracer = get_tracer(__name__, __version__, tracer_provider)
137 request_hook = kwargs.get("request_hook")
138 response_hook = kwargs.get("response_hook")
139 _wrap(
140 elasticsearch,
141 "Transport.perform_request",
142 _wrap_perform_request(
143 tracer, self._span_name_prefix, request_hook, response_hook
144 ),
145 )
146
147 def _uninstrument(self, **kwargs):
148 unwrap(elasticsearch.Transport, "perform_request")
149
150
151 _regex_doc_url = re.compile(r"/_doc/([^/]+)")
152
153 # search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html
154 _regex_search_url = re.compile(r"/([^/]+)/_search[/]?")
155
156
157 def _wrap_perform_request(
158 tracer, span_name_prefix, request_hook=None, response_hook=None
159 ):
160 # pylint: disable=R0912,R0914
161 def wrapper(wrapped, _, args, kwargs):
162 method = url = None
163 try:
164 method, url, *_ = args
165 except IndexError:
166 logger.warning(
167 "expected perform_request to receive two positional arguments. "
168 "Got %d",
169 len(args),
170 )
171
172 op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)
173
174 doc_id = None
175 search_target = None
176
177 if url:
178 # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()
179 # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7
180 # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708
181 match = _regex_doc_url.search(url)
182 if match is not None:
183 # Remove the full document ID from the URL
184 doc_span = match.span()
185 op_name = (
186 span_name_prefix
187 + url[: doc_span[0]]
188 + "/_doc/:id"
189 + url[doc_span[1] :]
190 )
191 # Put the document ID in attributes
192 doc_id = match.group(1)
193 match = _regex_search_url.search(url)
194 if match is not None:
195 op_name = span_name_prefix + "/<target>/_search"
196 search_target = match.group(1)
197
198 params = kwargs.get("params", {})
199 body = kwargs.get("body", None)
200
201 with tracer.start_as_current_span(
202 op_name,
203 kind=SpanKind.CLIENT,
204 ) as span:
205
206 if callable(request_hook):
207 request_hook(span, method, url, kwargs)
208
209 if span.is_recording():
210 attributes = {
211 SpanAttributes.DB_SYSTEM: "elasticsearch",
212 }
213 if url:
214 attributes["elasticsearch.url"] = url
215 if method:
216 attributes["elasticsearch.method"] = method
217 if body:
218 attributes[SpanAttributes.DB_STATEMENT] = str(body)
219 if params:
220 attributes["elasticsearch.params"] = str(params)
221 if doc_id:
222 attributes["elasticsearch.id"] = doc_id
223 if search_target:
224 attributes["elasticsearch.target"] = search_target
225 for key, value in attributes.items():
226 span.set_attribute(key, value)
227
228 rv = wrapped(*args, **kwargs)
229 if isinstance(rv, dict) and span.is_recording():
230 for member in _ATTRIBUTES_FROM_RESULT:
231 if member in rv:
232 span.set_attribute(
233 f"elasticsearch.{member}",
234 str(rv[member]),
235 )
236
237 if callable(response_hook):
238 response_hook(span, rv)
239 return rv
240
241 return wrapper
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py
@@ -34,7 +34,7 @@
es.get(index='my-index', doc_type='my-type', id=1)
Elasticsearch instrumentation prefixes operation names with the string "Elasticsearch". This
-can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`
+can be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX
environment variable or by passing the prefix as an argument to the instrumentor. For example,
@@ -42,16 +42,15 @@
ElasticsearchInstrumentor("my-custom-prefix").instrument()
-
-The `instrument` method accepts the following keyword args:
-
+The instrument() method accepts the following keyword args:
tracer_provider (TracerProvider) - an optional tracer provider
request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request
- this function signature is:
- def request_hook(span: Span, method: str, url: str, kwargs)
+this function signature is:
+def request_hook(span: Span, method: str, url: str, kwargs)
+
response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request
- this function signature is:
- def response_hook(span: Span, response: dict)
+this function signature is:
+def response_hook(span: Span, response: dict)
for example:
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py\n@@ -34,7 +34,7 @@\n es.get(index='my-index', doc_type='my-type', id=1)\n \n Elasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\n-can be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\n+can be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\n environment variable or by passing the prefix as an argument to the instrumentor. For example,\n \n \n@@ -42,16 +42,15 @@\n \n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n \n-\n-The `instrument` method accepts the following keyword args:\n-\n+The instrument() method accepts the following keyword args:\n tracer_provider (TracerProvider) - an optional tracer provider\n request_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n- this function signature is:\n- def request_hook(span: Span, method: str, url: str, kwargs)\n+this function signature is:\n+def request_hook(span: Span, method: str, url: str, kwargs)\n+\n response_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n- this function signature is:\n- def response_hook(span: Span, response: dict)\n+this function signature is:\n+def response_hook(span: Span, response: dict)\n \n for example:\n", "issue": "Add readthedocs documentation for elasticsearch instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the `OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX`\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\n\nThe `instrument` method accepts the following keyword args:\n\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\n this function signature is:\n def request_hook(span: Span, method: str, url: str, kwargs)\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\n this function signature is:\n def response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments Elasticsearch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP elasticsearch made by the\n`elasticsearch <https://elasticsearch-py.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n\n # instrument elasticsearch\n ElasticsearchInstrumentor().instrument()\n\n # Using elasticsearch as normal now will automatically generate spans\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nElasticsearch instrumentation prefixes operation names with the string \"Elasticsearch\". This\ncan be changed to a different string by either setting the OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\nenvironment variable or by passing the prefix as an argument to the instrumentor. For example,\n\n\n.. code-block:: python\n\n ElasticsearchInstrumentor(\"my-custom-prefix\").instrument()\n\nThe instrument() method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\nrequest_hook (Callable) - a function with extra user-defined logic to be performed before performing the request\nthis function signature is:\ndef request_hook(span: Span, method: str, url: str, kwargs)\n\nresponse_hook (Callable) - a function with extra user-defined logic to be performed after performing the request\nthis function signature is:\ndef response_hook(span: Span, response: dict)\n\nfor example:\n\n.. code: python\n\n from opentelemetry.instrumentation.elasticsearch import ElasticsearchInstrumentor\n import elasticsearch\n\n def request_hook(span, method, url, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def response_hook(span, response):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n # instrument elasticsearch with request and response hooks\n ElasticsearchInstrumentor().instrument(request_hook=request_hook, response_hook=response_hook)\n\n # Using elasticsearch as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n es = elasticsearch.Elasticsearch()\n es.index(index='my-index', doc_type='my-type', id=1, body={'my': 'data', 'timestamp': datetime.now()})\n es.get(index='my-index', doc_type='my-type', id=1)\n\nAPI\n---\n\"\"\"\n\nimport re\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Collection\n\nimport elasticsearch\nimport elasticsearch.exceptions\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.elasticsearch.package import _instruments\nfrom opentelemetry.instrumentation.elasticsearch.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = getLogger(__name__)\n\n\n# Values to add as tags from the actual\n# payload returned by Elasticsearch, if any.\n_ATTRIBUTES_FROM_RESULT = [\n \"found\",\n \"timed_out\",\n \"took\",\n]\n\n_DEFAULT_OP_NAME = \"request\"\n\n\nclass ElasticsearchInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for elasticsearch\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self, span_name_prefix=None):\n if not span_name_prefix:\n span_name_prefix = environ.get(\n \"OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX\",\n \"Elasticsearch\",\n )\n self._span_name_prefix = span_name_prefix.strip()\n super().__init__()\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"\n Instruments Elasticsearch module\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n request_hook = kwargs.get(\"request_hook\")\n response_hook = kwargs.get(\"response_hook\")\n _wrap(\n elasticsearch,\n \"Transport.perform_request\",\n _wrap_perform_request(\n tracer, self._span_name_prefix, request_hook, response_hook\n ),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(elasticsearch.Transport, \"perform_request\")\n\n\n_regex_doc_url = re.compile(r\"/_doc/([^/]+)\")\n\n# search api https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html\n_regex_search_url = re.compile(r\"/([^/]+)/_search[/]?\")\n\n\ndef _wrap_perform_request(\n tracer, span_name_prefix, request_hook=None, response_hook=None\n):\n # pylint: disable=R0912,R0914\n def wrapper(wrapped, _, args, kwargs):\n method = url = None\n try:\n method, url, *_ = args\n except IndexError:\n logger.warning(\n \"expected perform_request to receive two positional arguments. \"\n \"Got %d\",\n len(args),\n )\n\n op_name = span_name_prefix + (url or method or _DEFAULT_OP_NAME)\n\n doc_id = None\n search_target = None\n\n if url:\n # TODO: This regex-based solution avoids creating an unbounded number of span names, but should be replaced by instrumenting individual Elasticsearch methods instead of Transport.perform_request()\n # A limitation of the regex is that only the '_doc' mapping type is supported. Mapping types are deprecated since Elasticsearch 7\n # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/708\n match = _regex_doc_url.search(url)\n if match is not None:\n # Remove the full document ID from the URL\n doc_span = match.span()\n op_name = (\n span_name_prefix\n + url[: doc_span[0]]\n + \"/_doc/:id\"\n + url[doc_span[1] :]\n )\n # Put the document ID in attributes\n doc_id = match.group(1)\n match = _regex_search_url.search(url)\n if match is not None:\n op_name = span_name_prefix + \"/<target>/_search\"\n search_target = match.group(1)\n\n params = kwargs.get(\"params\", {})\n body = kwargs.get(\"body\", None)\n\n with tracer.start_as_current_span(\n op_name,\n kind=SpanKind.CLIENT,\n ) as span:\n\n if callable(request_hook):\n request_hook(span, method, url, kwargs)\n\n if span.is_recording():\n attributes = {\n SpanAttributes.DB_SYSTEM: \"elasticsearch\",\n }\n if url:\n attributes[\"elasticsearch.url\"] = url\n if method:\n attributes[\"elasticsearch.method\"] = method\n if body:\n attributes[SpanAttributes.DB_STATEMENT] = str(body)\n if params:\n attributes[\"elasticsearch.params\"] = str(params)\n if doc_id:\n attributes[\"elasticsearch.id\"] = doc_id\n if search_target:\n attributes[\"elasticsearch.target\"] = search_target\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n rv = wrapped(*args, **kwargs)\n if isinstance(rv, dict) and span.is_recording():\n for member in _ATTRIBUTES_FROM_RESULT:\n if member in rv:\n span.set_attribute(\n f\"elasticsearch.{member}\",\n str(rv[member]),\n )\n\n if callable(response_hook):\n response_hook(span, rv)\n return rv\n\n return wrapper\n", "path": "instrumentation/opentelemetry-instrumentation-elasticsearch/src/opentelemetry/instrumentation/elasticsearch/__init__.py"}]}
| 2,715 | 418 |
gh_patches_debug_63641
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-566
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request - block quote formatting in reviews
I published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:
```
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
```
But it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!
I would expect it to render more like it does here on Github:
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/sanitize_html.py`
Content:
```
1 ''' html parser to clean up incoming text from unknown sources '''
2 from html.parser import HTMLParser
3
4 class InputHtmlParser(HTMLParser):#pylint: disable=abstract-method
5 ''' Removes any html that isn't allowed_tagsed from a block '''
6
7 def __init__(self):
8 HTMLParser.__init__(self)
9 self.allowed_tags = [
10 'p', 'br',
11 'b', 'i', 'strong', 'em', 'pre',
12 'a', 'span', 'ul', 'ol', 'li'
13 ]
14 self.tag_stack = []
15 self.output = []
16 # if the html appears invalid, we just won't allow any at all
17 self.allow_html = True
18
19
20 def handle_starttag(self, tag, attrs):
21 ''' check if the tag is valid '''
22 if self.allow_html and tag in self.allowed_tags:
23 self.output.append(('tag', self.get_starttag_text()))
24 self.tag_stack.append(tag)
25 else:
26 self.output.append(('data', ''))
27
28
29 def handle_endtag(self, tag):
30 ''' keep the close tag '''
31 if not self.allow_html or tag not in self.allowed_tags:
32 self.output.append(('data', ''))
33 return
34
35 if not self.tag_stack or self.tag_stack[-1] != tag:
36 # the end tag doesn't match the most recent start tag
37 self.allow_html = False
38 self.output.append(('data', ''))
39 return
40
41 self.tag_stack = self.tag_stack[:-1]
42 self.output.append(('tag', '</%s>' % tag))
43
44
45 def handle_data(self, data):
46 ''' extract the answer, if we're in an answer tag '''
47 self.output.append(('data', data))
48
49
50 def get_output(self):
51 ''' convert the output from a list of tuples to a string '''
52 if self.tag_stack:
53 self.allow_html = False
54 if not self.allow_html:
55 return ''.join(v for (k, v) in self.output if k == 'data')
56 return ''.join(v for (k, v) in self.output)
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py
--- a/bookwyrm/sanitize_html.py
+++ b/bookwyrm/sanitize_html.py
@@ -7,7 +7,7 @@
def __init__(self):
HTMLParser.__init__(self)
self.allowed_tags = [
- 'p', 'br',
+ 'p', 'blockquote', 'br',
'b', 'i', 'strong', 'em', 'pre',
'a', 'span', 'ul', 'ol', 'li'
]
|
{"golden_diff": "diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py\n--- a/bookwyrm/sanitize_html.py\n+++ b/bookwyrm/sanitize_html.py\n@@ -7,7 +7,7 @@\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n- 'p', 'br',\n+ 'p', 'blockquote', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n", "issue": "Feature request - block quote formatting in reviews\nI published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:\r\n\r\n```\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\r\n```\r\n\r\nBut it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!\r\n\r\nI would expect it to render more like it does here on Github:\r\n\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\n", "before_files": [{"content": "''' html parser to clean up incoming text from unknown sources '''\nfrom html.parser import HTMLParser\n\nclass InputHtmlParser(HTMLParser):#pylint: disable=abstract-method\n ''' Removes any html that isn't allowed_tagsed from a block '''\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n 'p', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n self.tag_stack = []\n self.output = []\n # if the html appears invalid, we just won't allow any at all\n self.allow_html = True\n\n\n def handle_starttag(self, tag, attrs):\n ''' check if the tag is valid '''\n if self.allow_html and tag in self.allowed_tags:\n self.output.append(('tag', self.get_starttag_text()))\n self.tag_stack.append(tag)\n else:\n self.output.append(('data', ''))\n\n\n def handle_endtag(self, tag):\n ''' keep the close tag '''\n if not self.allow_html or tag not in self.allowed_tags:\n self.output.append(('data', ''))\n return\n\n if not self.tag_stack or self.tag_stack[-1] != tag:\n # the end tag doesn't match the most recent start tag\n self.allow_html = False\n self.output.append(('data', ''))\n return\n\n self.tag_stack = self.tag_stack[:-1]\n self.output.append(('tag', '</%s>' % tag))\n\n\n def handle_data(self, data):\n ''' extract the answer, if we're in an answer tag '''\n self.output.append(('data', data))\n\n\n def get_output(self):\n ''' convert the output from a list of tuples to a string '''\n if self.tag_stack:\n self.allow_html = False\n if not self.allow_html:\n return ''.join(v for (k, v) in self.output if k == 'data')\n return ''.join(v for (k, v) in self.output)\n", "path": "bookwyrm/sanitize_html.py"}], "after_files": [{"content": "''' html parser to clean up incoming text from unknown sources '''\nfrom html.parser import HTMLParser\n\nclass InputHtmlParser(HTMLParser):#pylint: disable=abstract-method\n ''' Removes any html that isn't allowed_tagsed from a block '''\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n 'p', 'blockquote', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n self.tag_stack = []\n self.output = []\n # if the html appears invalid, we just won't allow any at all\n self.allow_html = True\n\n\n def handle_starttag(self, tag, attrs):\n ''' check if the tag is valid '''\n if self.allow_html and tag in self.allowed_tags:\n self.output.append(('tag', self.get_starttag_text()))\n self.tag_stack.append(tag)\n else:\n self.output.append(('data', ''))\n\n\n def handle_endtag(self, tag):\n ''' keep the close tag '''\n if not self.allow_html or tag not in self.allowed_tags:\n self.output.append(('data', ''))\n return\n\n if not self.tag_stack or self.tag_stack[-1] != tag:\n # the end tag doesn't match the most recent start tag\n self.allow_html = False\n self.output.append(('data', ''))\n return\n\n self.tag_stack = self.tag_stack[:-1]\n self.output.append(('tag', '</%s>' % tag))\n\n\n def handle_data(self, data):\n ''' extract the answer, if we're in an answer tag '''\n self.output.append(('data', data))\n\n\n def get_output(self):\n ''' convert the output from a list of tuples to a string '''\n if self.tag_stack:\n self.allow_html = False\n if not self.allow_html:\n return ''.join(v for (k, v) in self.output if k == 'data')\n return ''.join(v for (k, v) in self.output)\n", "path": "bookwyrm/sanitize_html.py"}]}
| 1,025 | 124 |
gh_patches_debug_20702
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1206
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feature suggestion: add url in title instead of description in !pep command
currently the link when you use the !pep command is located in the description, but with the discord.Embed.url parameter, the title is now able to become a link (as shown in second image)


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/exts/utils/utils.py`
Content:
```
1 import difflib
2 import logging
3 import re
4 import unicodedata
5 from email.parser import HeaderParser
6 from io import StringIO
7 from typing import Tuple, Union
8
9 from discord import Colour, Embed, utils
10 from discord.ext.commands import BadArgument, Cog, Context, clean_content, command, has_any_role
11
12 from bot.bot import Bot
13 from bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES
14 from bot.decorators import in_whitelist
15 from bot.pagination import LinePaginator
16 from bot.utils import messages
17
18 log = logging.getLogger(__name__)
19
20 ZEN_OF_PYTHON = """\
21 Beautiful is better than ugly.
22 Explicit is better than implicit.
23 Simple is better than complex.
24 Complex is better than complicated.
25 Flat is better than nested.
26 Sparse is better than dense.
27 Readability counts.
28 Special cases aren't special enough to break the rules.
29 Although practicality beats purity.
30 Errors should never pass silently.
31 Unless explicitly silenced.
32 In the face of ambiguity, refuse the temptation to guess.
33 There should be one-- and preferably only one --obvious way to do it.
34 Although that way may not be obvious at first unless you're Dutch.
35 Now is better than never.
36 Although never is often better than *right* now.
37 If the implementation is hard to explain, it's a bad idea.
38 If the implementation is easy to explain, it may be a good idea.
39 Namespaces are one honking great idea -- let's do more of those!
40 """
41
42 ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png"
43
44
45 class Utils(Cog):
46 """A selection of utilities which don't have a clear category."""
47
48 def __init__(self, bot: Bot):
49 self.bot = bot
50
51 self.base_pep_url = "http://www.python.org/dev/peps/pep-"
52 self.base_github_pep_url = "https://raw.githubusercontent.com/python/peps/master/pep-"
53
54 @command(name='pep', aliases=('get_pep', 'p'))
55 async def pep_command(self, ctx: Context, pep_number: str) -> None:
56 """Fetches information about a PEP and sends it to the channel."""
57 if pep_number.isdigit():
58 pep_number = int(pep_number)
59 else:
60 await ctx.send_help(ctx.command)
61 return
62
63 # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
64 if pep_number == 0:
65 return await self.send_pep_zero(ctx)
66
67 possible_extensions = ['.txt', '.rst']
68 found_pep = False
69 for extension in possible_extensions:
70 # Attempt to fetch the PEP
71 pep_url = f"{self.base_github_pep_url}{pep_number:04}{extension}"
72 log.trace(f"Requesting PEP {pep_number} with {pep_url}")
73 response = await self.bot.http_session.get(pep_url)
74
75 if response.status == 200:
76 log.trace("PEP found")
77 found_pep = True
78
79 pep_content = await response.text()
80
81 # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
82 pep_header = HeaderParser().parse(StringIO(pep_content))
83
84 # Assemble the embed
85 pep_embed = Embed(
86 title=f"**PEP {pep_number} - {pep_header['Title']}**",
87 description=f"[Link]({self.base_pep_url}{pep_number:04})",
88 )
89
90 pep_embed.set_thumbnail(url=ICON_URL)
91
92 # Add the interesting information
93 fields_to_check = ("Status", "Python-Version", "Created", "Type")
94 for field in fields_to_check:
95 # Check for a PEP metadata field that is present but has an empty value
96 # embed field values can't contain an empty string
97 if pep_header.get(field, ""):
98 pep_embed.add_field(name=field, value=pep_header[field])
99
100 elif response.status != 404:
101 # any response except 200 and 404 is expected
102 found_pep = True # actually not, but it's easier to display this way
103 log.trace(f"The user requested PEP {pep_number}, but the response had an unexpected status code: "
104 f"{response.status}.\n{response.text}")
105
106 error_message = "Unexpected HTTP error during PEP search. Please let us know."
107 pep_embed = Embed(title="Unexpected error", description=error_message)
108 pep_embed.colour = Colour.red()
109 break
110
111 if not found_pep:
112 log.trace("PEP was not found")
113 not_found = f"PEP {pep_number} does not exist."
114 pep_embed = Embed(title="PEP not found", description=not_found)
115 pep_embed.colour = Colour.red()
116
117 await ctx.message.channel.send(embed=pep_embed)
118
119 @command()
120 @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)
121 async def charinfo(self, ctx: Context, *, characters: str) -> None:
122 """Shows you information on up to 50 unicode characters."""
123 match = re.match(r"<(a?):(\w+):(\d+)>", characters)
124 if match:
125 return await messages.send_denial(
126 ctx,
127 "**Non-Character Detected**\n"
128 "Only unicode characters can be processed, but a custom Discord emoji "
129 "was found. Please remove it and try again."
130 )
131
132 if len(characters) > 50:
133 return await messages.send_denial(ctx, f"Too many characters ({len(characters)}/50)")
134
135 def get_info(char: str) -> Tuple[str, str]:
136 digit = f"{ord(char):x}"
137 if len(digit) <= 4:
138 u_code = f"\\u{digit:>04}"
139 else:
140 u_code = f"\\U{digit:>08}"
141 url = f"https://www.compart.com/en/unicode/U+{digit:>04}"
142 name = f"[{unicodedata.name(char, '')}]({url})"
143 info = f"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}"
144 return info, u_code
145
146 char_list, raw_list = zip(*(get_info(c) for c in characters))
147 embed = Embed().set_author(name="Character Info")
148
149 if len(characters) > 1:
150 # Maximum length possible is 502 out of 1024, so there's no need to truncate.
151 embed.add_field(name='Full Raw Text', value=f"`{''.join(raw_list)}`", inline=False)
152
153 await LinePaginator.paginate(char_list, ctx, embed, max_lines=10, max_size=2000, empty=False)
154
155 @command()
156 async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:
157 """
158 Show the Zen of Python.
159
160 Without any arguments, the full Zen will be produced.
161 If an integer is provided, the line with that index will be produced.
162 If a string is provided, the line which matches best will be produced.
163 """
164 embed = Embed(
165 colour=Colour.blurple(),
166 title="The Zen of Python",
167 description=ZEN_OF_PYTHON
168 )
169
170 if search_value is None:
171 embed.title += ", by Tim Peters"
172 await ctx.send(embed=embed)
173 return
174
175 zen_lines = ZEN_OF_PYTHON.splitlines()
176
177 # handle if it's an index int
178 if isinstance(search_value, int):
179 upper_bound = len(zen_lines) - 1
180 lower_bound = -1 * upper_bound
181 if not (lower_bound <= search_value <= upper_bound):
182 raise BadArgument(f"Please provide an index between {lower_bound} and {upper_bound}.")
183
184 embed.title += f" (line {search_value % len(zen_lines)}):"
185 embed.description = zen_lines[search_value]
186 await ctx.send(embed=embed)
187 return
188
189 # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead
190 # exact word.
191 for i, line in enumerate(zen_lines):
192 for word in line.split():
193 if word.lower() == search_value.lower():
194 embed.title += f" (line {i}):"
195 embed.description = line
196 await ctx.send(embed=embed)
197 return
198
199 # handle if it's a search string and not exact word
200 matcher = difflib.SequenceMatcher(None, search_value.lower())
201
202 best_match = ""
203 match_index = 0
204 best_ratio = 0
205
206 for index, line in enumerate(zen_lines):
207 matcher.set_seq2(line.lower())
208
209 # the match ratio needs to be adjusted because, naturally,
210 # longer lines will have worse ratios than shorter lines when
211 # fuzzy searching for keywords. this seems to work okay.
212 adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()
213
214 if adjusted_ratio > best_ratio:
215 best_ratio = adjusted_ratio
216 best_match = line
217 match_index = index
218
219 if not best_match:
220 raise BadArgument("I didn't get a match! Please try again with a different search term.")
221
222 embed.title += f" (line {match_index}):"
223 embed.description = best_match
224 await ctx.send(embed=embed)
225
226 @command(aliases=("poll",))
227 @has_any_role(*MODERATION_ROLES)
228 async def vote(self, ctx: Context, title: clean_content(fix_channel_mentions=True), *options: str) -> None:
229 """
230 Build a quick voting poll with matching reactions with the provided options.
231
232 A maximum of 20 options can be provided, as Discord supports a max of 20
233 reactions on a single message.
234 """
235 if len(title) > 256:
236 raise BadArgument("The title cannot be longer than 256 characters.")
237 if len(options) < 2:
238 raise BadArgument("Please provide at least 2 options.")
239 if len(options) > 20:
240 raise BadArgument("I can only handle 20 options!")
241
242 codepoint_start = 127462 # represents "regional_indicator_a" unicode value
243 options = {chr(i): f"{chr(i)} - {v}" for i, v in enumerate(options, start=codepoint_start)}
244 embed = Embed(title=title, description="\n".join(options.values()))
245 message = await ctx.send(embed=embed)
246 for reaction in options:
247 await message.add_reaction(reaction)
248
249 async def send_pep_zero(self, ctx: Context) -> None:
250 """Send information about PEP 0."""
251 pep_embed = Embed(
252 title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
253 description="[Link](https://www.python.org/dev/peps/)"
254 )
255 pep_embed.set_thumbnail(url=ICON_URL)
256 pep_embed.add_field(name="Status", value="Active")
257 pep_embed.add_field(name="Created", value="13-Jul-2000")
258 pep_embed.add_field(name="Type", value="Informational")
259
260 await ctx.send(embed=pep_embed)
261
262
263 def setup(bot: Bot) -> None:
264 """Load the Utils cog."""
265 bot.add_cog(Utils(bot))
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py
--- a/bot/exts/utils/utils.py
+++ b/bot/exts/utils/utils.py
@@ -84,7 +84,7 @@
# Assemble the embed
pep_embed = Embed(
title=f"**PEP {pep_number} - {pep_header['Title']}**",
- description=f"[Link]({self.base_pep_url}{pep_number:04})",
+ url=f"{self.base_pep_url}{pep_number:04}"
)
pep_embed.set_thumbnail(url=ICON_URL)
@@ -250,7 +250,7 @@
"""Send information about PEP 0."""
pep_embed = Embed(
title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
- description="[Link](https://www.python.org/dev/peps/)"
+ url="https://www.python.org/dev/peps/"
)
pep_embed.set_thumbnail(url=ICON_URL)
pep_embed.add_field(name="Status", value="Active")
|
{"golden_diff": "diff --git a/bot/exts/utils/utils.py b/bot/exts/utils/utils.py\n--- a/bot/exts/utils/utils.py\n+++ b/bot/exts/utils/utils.py\n@@ -84,7 +84,7 @@\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n- description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n+ url=f\"{self.base_pep_url}{pep_number:04}\"\n )\n \n pep_embed.set_thumbnail(url=ICON_URL)\n@@ -250,7 +250,7 @@\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n- description=\"[Link](https://www.python.org/dev/peps/)\"\n+ url=\"https://www.python.org/dev/peps/\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n", "issue": "feature suggestion: add url in title instead of description in !pep command\ncurrently the link when you use the !pep command is located in the description, but with the discord.Embed.url parameter, the title is now able to become a link (as shown in second image)\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import difflib\nimport logging\nimport re\nimport unicodedata\nfrom email.parser import HeaderParser\nfrom io import StringIO\nfrom typing import Tuple, Union\n\nfrom discord import Colour, Embed, utils\nfrom discord.ext.commands import BadArgument, Cog, Context, clean_content, command, has_any_role\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import in_whitelist\nfrom bot.pagination import LinePaginator\nfrom bot.utils import messages\n\nlog = logging.getLogger(__name__)\n\nZEN_OF_PYTHON = \"\"\"\\\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\n\nICON_URL = \"https://www.python.org/static/opengraph-icon-200x200.png\"\n\n\nclass Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.base_pep_url = \"http://www.python.org/dev/peps/pep-\"\n self.base_github_pep_url = \"https://raw.githubusercontent.com/python/peps/master/pep-\"\n\n @command(name='pep', aliases=('get_pep', 'p'))\n async def pep_command(self, ctx: Context, pep_number: str) -> None:\n \"\"\"Fetches information about a PEP and sends it to the channel.\"\"\"\n if pep_number.isdigit():\n pep_number = int(pep_number)\n else:\n await ctx.send_help(ctx.command)\n return\n\n # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.\n if pep_number == 0:\n return await self.send_pep_zero(ctx)\n\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n # Attempt to fetch the PEP\n pep_url = f\"{self.base_github_pep_url}{pep_number:04}{extension}\"\n log.trace(f\"Requesting PEP {pep_number} with {pep_url}\")\n response = await self.bot.http_session.get(pep_url)\n\n if response.status == 200:\n log.trace(\"PEP found\")\n found_pep = True\n\n pep_content = await response.text()\n\n # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179\n pep_header = HeaderParser().parse(StringIO(pep_content))\n\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n )\n\n pep_embed.set_thumbnail(url=ICON_URL)\n\n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n for field in fields_to_check:\n # Check for a PEP metadata field that is present but has an empty value\n # embed field values can't contain an empty string\n if pep_header.get(field, \"\"):\n pep_embed.add_field(name=field, value=pep_header[field])\n\n elif response.status != 404:\n # any response except 200 and 404 is expected\n found_pep = True # actually not, but it's easier to display this way\n log.trace(f\"The user requested PEP {pep_number}, but the response had an unexpected status code: \"\n f\"{response.status}.\\n{response.text}\")\n\n error_message = \"Unexpected HTTP error during PEP search. Please let us know.\"\n pep_embed = Embed(title=\"Unexpected error\", description=error_message)\n pep_embed.colour = Colour.red()\n break\n\n if not found_pep:\n log.trace(\"PEP was not found\")\n not_found = f\"PEP {pep_number} does not exist.\"\n pep_embed = Embed(title=\"PEP not found\", description=not_found)\n pep_embed.colour = Colour.red()\n\n await ctx.message.channel.send(embed=pep_embed)\n\n @command()\n @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)\n async def charinfo(self, ctx: Context, *, characters: str) -> None:\n \"\"\"Shows you information on up to 50 unicode characters.\"\"\"\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n return await messages.send_denial(\n ctx,\n \"**Non-Character Detected**\\n\"\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n\n if len(characters) > 50:\n return await messages.send_denial(ctx, f\"Too many characters ({len(characters)}/50)\")\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}\"\n return info, u_code\n\n char_list, raw_list = zip(*(get_info(c) for c in characters))\n embed = Embed().set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n # Maximum length possible is 502 out of 1024, so there's no need to truncate.\n embed.add_field(name='Full Raw Text', value=f\"`{''.join(raw_list)}`\", inline=False)\n\n await LinePaginator.paginate(char_list, ctx, embed, max_lines=10, max_size=2000, empty=False)\n\n @command()\n async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:\n \"\"\"\n Show the Zen of Python.\n\n Without any arguments, the full Zen will be produced.\n If an integer is provided, the line with that index will be produced.\n If a string is provided, the line which matches best will be produced.\n \"\"\"\n embed = Embed(\n colour=Colour.blurple(),\n title=\"The Zen of Python\",\n description=ZEN_OF_PYTHON\n )\n\n if search_value is None:\n embed.title += \", by Tim Peters\"\n await ctx.send(embed=embed)\n return\n\n zen_lines = ZEN_OF_PYTHON.splitlines()\n\n # handle if it's an index int\n if isinstance(search_value, int):\n upper_bound = len(zen_lines) - 1\n lower_bound = -1 * upper_bound\n if not (lower_bound <= search_value <= upper_bound):\n raise BadArgument(f\"Please provide an index between {lower_bound} and {upper_bound}.\")\n\n embed.title += f\" (line {search_value % len(zen_lines)}):\"\n embed.description = zen_lines[search_value]\n await ctx.send(embed=embed)\n return\n\n # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead\n # exact word.\n for i, line in enumerate(zen_lines):\n for word in line.split():\n if word.lower() == search_value.lower():\n embed.title += f\" (line {i}):\"\n embed.description = line\n await ctx.send(embed=embed)\n return\n\n # handle if it's a search string and not exact word\n matcher = difflib.SequenceMatcher(None, search_value.lower())\n\n best_match = \"\"\n match_index = 0\n best_ratio = 0\n\n for index, line in enumerate(zen_lines):\n matcher.set_seq2(line.lower())\n\n # the match ratio needs to be adjusted because, naturally,\n # longer lines will have worse ratios than shorter lines when\n # fuzzy searching for keywords. this seems to work okay.\n adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()\n\n if adjusted_ratio > best_ratio:\n best_ratio = adjusted_ratio\n best_match = line\n match_index = index\n\n if not best_match:\n raise BadArgument(\"I didn't get a match! Please try again with a different search term.\")\n\n embed.title += f\" (line {match_index}):\"\n embed.description = best_match\n await ctx.send(embed=embed)\n\n @command(aliases=(\"poll\",))\n @has_any_role(*MODERATION_ROLES)\n async def vote(self, ctx: Context, title: clean_content(fix_channel_mentions=True), *options: str) -> None:\n \"\"\"\n Build a quick voting poll with matching reactions with the provided options.\n\n A maximum of 20 options can be provided, as Discord supports a max of 20\n reactions on a single message.\n \"\"\"\n if len(title) > 256:\n raise BadArgument(\"The title cannot be longer than 256 characters.\")\n if len(options) < 2:\n raise BadArgument(\"Please provide at least 2 options.\")\n if len(options) > 20:\n raise BadArgument(\"I can only handle 20 options!\")\n\n codepoint_start = 127462 # represents \"regional_indicator_a\" unicode value\n options = {chr(i): f\"{chr(i)} - {v}\" for i, v in enumerate(options, start=codepoint_start)}\n embed = Embed(title=title, description=\"\\n\".join(options.values()))\n message = await ctx.send(embed=embed)\n for reaction in options:\n await message.add_reaction(reaction)\n\n async def send_pep_zero(self, ctx: Context) -> None:\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n description=\"[Link](https://www.python.org/dev/peps/)\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n pep_embed.add_field(name=\"Created\", value=\"13-Jul-2000\")\n pep_embed.add_field(name=\"Type\", value=\"Informational\")\n\n await ctx.send(embed=pep_embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n bot.add_cog(Utils(bot))\n", "path": "bot/exts/utils/utils.py"}], "after_files": [{"content": "import difflib\nimport logging\nimport re\nimport unicodedata\nfrom email.parser import HeaderParser\nfrom io import StringIO\nfrom typing import Tuple, Union\n\nfrom discord import Colour, Embed, utils\nfrom discord.ext.commands import BadArgument, Cog, Context, clean_content, command, has_any_role\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, MODERATION_ROLES, STAFF_ROLES\nfrom bot.decorators import in_whitelist\nfrom bot.pagination import LinePaginator\nfrom bot.utils import messages\n\nlog = logging.getLogger(__name__)\n\nZEN_OF_PYTHON = \"\"\"\\\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\n\nICON_URL = \"https://www.python.org/static/opengraph-icon-200x200.png\"\n\n\nclass Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.base_pep_url = \"http://www.python.org/dev/peps/pep-\"\n self.base_github_pep_url = \"https://raw.githubusercontent.com/python/peps/master/pep-\"\n\n @command(name='pep', aliases=('get_pep', 'p'))\n async def pep_command(self, ctx: Context, pep_number: str) -> None:\n \"\"\"Fetches information about a PEP and sends it to the channel.\"\"\"\n if pep_number.isdigit():\n pep_number = int(pep_number)\n else:\n await ctx.send_help(ctx.command)\n return\n\n # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.\n if pep_number == 0:\n return await self.send_pep_zero(ctx)\n\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n # Attempt to fetch the PEP\n pep_url = f\"{self.base_github_pep_url}{pep_number:04}{extension}\"\n log.trace(f\"Requesting PEP {pep_number} with {pep_url}\")\n response = await self.bot.http_session.get(pep_url)\n\n if response.status == 200:\n log.trace(\"PEP found\")\n found_pep = True\n\n pep_content = await response.text()\n\n # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179\n pep_header = HeaderParser().parse(StringIO(pep_content))\n\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n url=f\"{self.base_pep_url}{pep_number:04}\"\n )\n\n pep_embed.set_thumbnail(url=ICON_URL)\n\n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n for field in fields_to_check:\n # Check for a PEP metadata field that is present but has an empty value\n # embed field values can't contain an empty string\n if pep_header.get(field, \"\"):\n pep_embed.add_field(name=field, value=pep_header[field])\n\n elif response.status != 404:\n # any response except 200 and 404 is expected\n found_pep = True # actually not, but it's easier to display this way\n log.trace(f\"The user requested PEP {pep_number}, but the response had an unexpected status code: \"\n f\"{response.status}.\\n{response.text}\")\n\n error_message = \"Unexpected HTTP error during PEP search. Please let us know.\"\n pep_embed = Embed(title=\"Unexpected error\", description=error_message)\n pep_embed.colour = Colour.red()\n break\n\n if not found_pep:\n log.trace(\"PEP was not found\")\n not_found = f\"PEP {pep_number} does not exist.\"\n pep_embed = Embed(title=\"PEP not found\", description=not_found)\n pep_embed.colour = Colour.red()\n\n await ctx.message.channel.send(embed=pep_embed)\n\n @command()\n @in_whitelist(channels=(Channels.bot_commands,), roles=STAFF_ROLES)\n async def charinfo(self, ctx: Context, *, characters: str) -> None:\n \"\"\"Shows you information on up to 50 unicode characters.\"\"\"\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n return await messages.send_denial(\n ctx,\n \"**Non-Character Detected**\\n\"\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n\n if len(characters) > 50:\n return await messages.send_denial(ctx, f\"Too many characters ({len(characters)}/50)\")\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}\"\n return info, u_code\n\n char_list, raw_list = zip(*(get_info(c) for c in characters))\n embed = Embed().set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n # Maximum length possible is 502 out of 1024, so there's no need to truncate.\n embed.add_field(name='Full Raw Text', value=f\"`{''.join(raw_list)}`\", inline=False)\n\n await LinePaginator.paginate(char_list, ctx, embed, max_lines=10, max_size=2000, empty=False)\n\n @command()\n async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:\n \"\"\"\n Show the Zen of Python.\n\n Without any arguments, the full Zen will be produced.\n If an integer is provided, the line with that index will be produced.\n If a string is provided, the line which matches best will be produced.\n \"\"\"\n embed = Embed(\n colour=Colour.blurple(),\n title=\"The Zen of Python\",\n description=ZEN_OF_PYTHON\n )\n\n if search_value is None:\n embed.title += \", by Tim Peters\"\n await ctx.send(embed=embed)\n return\n\n zen_lines = ZEN_OF_PYTHON.splitlines()\n\n # handle if it's an index int\n if isinstance(search_value, int):\n upper_bound = len(zen_lines) - 1\n lower_bound = -1 * upper_bound\n if not (lower_bound <= search_value <= upper_bound):\n raise BadArgument(f\"Please provide an index between {lower_bound} and {upper_bound}.\")\n\n embed.title += f\" (line {search_value % len(zen_lines)}):\"\n embed.description = zen_lines[search_value]\n await ctx.send(embed=embed)\n return\n\n # Try to handle first exact word due difflib.SequenceMatched may use some other similar word instead\n # exact word.\n for i, line in enumerate(zen_lines):\n for word in line.split():\n if word.lower() == search_value.lower():\n embed.title += f\" (line {i}):\"\n embed.description = line\n await ctx.send(embed=embed)\n return\n\n # handle if it's a search string and not exact word\n matcher = difflib.SequenceMatcher(None, search_value.lower())\n\n best_match = \"\"\n match_index = 0\n best_ratio = 0\n\n for index, line in enumerate(zen_lines):\n matcher.set_seq2(line.lower())\n\n # the match ratio needs to be adjusted because, naturally,\n # longer lines will have worse ratios than shorter lines when\n # fuzzy searching for keywords. this seems to work okay.\n adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()\n\n if adjusted_ratio > best_ratio:\n best_ratio = adjusted_ratio\n best_match = line\n match_index = index\n\n if not best_match:\n raise BadArgument(\"I didn't get a match! Please try again with a different search term.\")\n\n embed.title += f\" (line {match_index}):\"\n embed.description = best_match\n await ctx.send(embed=embed)\n\n @command(aliases=(\"poll\",))\n @has_any_role(*MODERATION_ROLES)\n async def vote(self, ctx: Context, title: clean_content(fix_channel_mentions=True), *options: str) -> None:\n \"\"\"\n Build a quick voting poll with matching reactions with the provided options.\n\n A maximum of 20 options can be provided, as Discord supports a max of 20\n reactions on a single message.\n \"\"\"\n if len(title) > 256:\n raise BadArgument(\"The title cannot be longer than 256 characters.\")\n if len(options) < 2:\n raise BadArgument(\"Please provide at least 2 options.\")\n if len(options) > 20:\n raise BadArgument(\"I can only handle 20 options!\")\n\n codepoint_start = 127462 # represents \"regional_indicator_a\" unicode value\n options = {chr(i): f\"{chr(i)} - {v}\" for i, v in enumerate(options, start=codepoint_start)}\n embed = Embed(title=title, description=\"\\n\".join(options.values()))\n message = await ctx.send(embed=embed)\n for reaction in options:\n await message.add_reaction(reaction)\n\n async def send_pep_zero(self, ctx: Context) -> None:\n \"\"\"Send information about PEP 0.\"\"\"\n pep_embed = Embed(\n title=\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n url=\"https://www.python.org/dev/peps/\"\n )\n pep_embed.set_thumbnail(url=ICON_URL)\n pep_embed.add_field(name=\"Status\", value=\"Active\")\n pep_embed.add_field(name=\"Created\", value=\"13-Jul-2000\")\n pep_embed.add_field(name=\"Type\", value=\"Informational\")\n\n await ctx.send(embed=pep_embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n bot.add_cog(Utils(bot))\n", "path": "bot/exts/utils/utils.py"}]}
| 3,648 | 246 |
gh_patches_debug_42427
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-5112
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[checks][python] How to determine which fields are changing in a resource
While you can use `TF_PLAN_RESOURCE_CHANGE_ACTIONS` to see "did a resource change", I'm wanting to know "what field(s) changed"?
The plan data definitely exposes this, so it should be possible, at the very least. Is this already available and just not explicitly documented? If not, is that on the roadmap to add?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/plan_parser.py`
Content:
```
1 from __future__ import annotations
2
3 import itertools
4 import json
5 import logging
6 from typing import Optional, Tuple, Dict, List, Any, cast
7
8 from checkov.common.graph.graph_builder import CustomAttributes
9 from checkov.common.parsers.node import ListNode
10 from checkov.common.util.type_forcers import force_list
11 from checkov.terraform.context_parsers.tf_plan import parse
12
13 SIMPLE_TYPES = (str, int, float, bool)
14 TF_PLAN_RESOURCE_ADDRESS = "__address__"
15 TF_PLAN_RESOURCE_CHANGE_ACTIONS = "__change_actions__"
16
17 RESOURCE_TYPES_JSONIFY = {
18 "aws_batch_job_definition": "container_properties",
19 "aws_ecs_task_definition": "container_definitions",
20 "aws_iam_policy": "policy",
21 "aws_iam_role": "assume_role_policy",
22 "aws_iam_role_policy": "policy",
23 "aws_iam_group_policy": "policy",
24 "aws_iam_user_policy": "policy",
25 "aws_ssoadmin_permission_set_inline_policy": "inline_policy",
26 }
27
28
29 def _is_simple_type(obj: Any) -> bool:
30 if obj is None:
31 return True
32 if isinstance(obj, SIMPLE_TYPES):
33 return True
34 return False
35
36
37 def _is_list_of_simple_types(obj: Any) -> bool:
38 if not isinstance(obj, list):
39 return False
40 for i in obj:
41 if not _is_simple_type(i):
42 return False
43 return True
44
45
46 def _is_list_of_dicts(obj: Any) -> bool:
47 if not isinstance(obj, list):
48 return False
49 for i in obj:
50 if isinstance(i, dict):
51 return True
52 return False
53
54
55 def _hclify(
56 obj: dict[str, Any],
57 conf: dict[str, Any] | None = None,
58 parent_key: str | None = None,
59 resource_type: str | None = None,
60 ) -> dict[str, list[Any]]:
61 ret_dict = {}
62
63 if not isinstance(obj, dict):
64 raise Exception("this method receives only dicts")
65
66 if hasattr(obj, "start_mark") and hasattr(obj, "end_mark"):
67 obj["start_line"] = obj.start_mark.line
68 obj["end_line"] = obj.end_mark.line
69 for key, value in obj.items():
70 if _is_simple_type(value) or _is_list_of_simple_types(value):
71 if parent_key == "tags":
72 ret_dict[key] = value
73 else:
74 # only wrap non-lists into a list
75 ret_dict[key] = _clean_simple_type_list([value])
76
77 if _is_list_of_dicts(value):
78 child_list = []
79 conf_val = conf.get(key, []) if conf else []
80 if not isinstance(conf_val, list):
81 # this occurs, when a resource in the current state has no value for that argument
82 conf_val = [conf_val]
83
84 for internal_val, internal_conf_val in itertools.zip_longest(value, conf_val):
85 if isinstance(internal_val, dict):
86 child_list.append(_hclify(internal_val, internal_conf_val, parent_key=key))
87 if key == "tags":
88 ret_dict[key] = [child_list]
89 else:
90 ret_dict[key] = child_list
91 if isinstance(value, dict):
92 child_dict = _hclify(value, parent_key=key)
93 if parent_key == "tags":
94 ret_dict[key] = child_dict
95 else:
96 ret_dict[key] = [child_dict]
97 if conf and isinstance(conf, dict):
98 _add_references(obj=obj, conf=conf, return_resource=ret_dict)
99
100 if resource_type and resource_type in RESOURCE_TYPES_JSONIFY:
101 # values shouldn't be encapsulated in lists
102 dict_value = jsonify(obj=obj, resource_type=resource_type)
103 if dict_value is not None:
104 ret_dict[RESOURCE_TYPES_JSONIFY[resource_type]] = force_list(dict_value)
105
106 return ret_dict
107
108
109 def jsonify(obj: dict[str, Any], resource_type: str) -> dict[str, Any] | None:
110 """Tries to create a dict from a string of a supported resource type attribute"""
111
112 jsonify_key = RESOURCE_TYPES_JSONIFY[resource_type]
113 if jsonify_key in obj:
114 try:
115 return cast("dict[str, Any]", json.loads(obj[jsonify_key]))
116 except json.JSONDecodeError:
117 logging.debug(
118 f"Attribute {jsonify_key} of resource type {resource_type} is not json encoded {obj[jsonify_key]}"
119 )
120
121 return None
122
123
124 def _prepare_resource_block(
125 resource: dict[str, Any], conf: dict[str, Any] | None, resource_changes: dict[str, dict[str, Any]]
126 ) -> tuple[dict[str, dict[str, Any]], str, bool]:
127 """hclify resource if pre-conditions met.
128
129 :param resource: tf planned_values resource block
130 :param conf: tf configuration resource block
131 :param resource_changes: tf resource_changes block
132
133 :returns:
134 - resource_block: a list of strings representing the header columns
135 - prepared: whether conditions met to prepare data
136 """
137
138 resource_block: Dict[str, Dict[str, Any]] = {}
139 resource_type = resource["type"]
140 resource_block[resource_type] = {}
141 prepared = False
142 mode = ""
143 block_type = ""
144 if "mode" in resource:
145 mode = resource["mode"]
146 block_type = "data" if mode == "data" else "resource"
147
148 # Rare cases where data block appears in resources with same name as resource block and only partial values
149 # and where *_module resources don't have values field
150 if mode in ("managed", "data"):
151 expressions = conf.get("expressions") if conf else None
152
153 resource_conf = _hclify(
154 obj=resource.get("values", {"start_line": 0, "end_line": 0}),
155 conf=expressions,
156 resource_type=resource_type,
157 )
158 resource_address: str | None = resource.get("address")
159 resource_conf[TF_PLAN_RESOURCE_ADDRESS] = resource_address # type:ignore[assignment] # special field
160
161 changes = resource_changes.get(resource_address) # type:ignore[arg-type] # becaus eit can be None
162 if changes:
163 resource_conf[TF_PLAN_RESOURCE_CHANGE_ACTIONS] = changes.get("change", {}).get("actions") or []
164
165 resource_block[resource_type][resource.get("name", "default")] = resource_conf
166 prepared = True
167 return resource_block, block_type, prepared
168
169
170 def _find_child_modules(
171 child_modules: ListNode, resource_changes: dict[str, dict[str, Any]], root_module_conf: dict[str, Any]
172 ) -> dict[str, list[dict[str, dict[str, Any]]]]:
173 """ Find all child modules if any. Including any amount of nested child modules.
174
175 :param child_modules: list of terraform child_module objects
176 :param resource_changes: a resource address to resource changes dict
177 :param root_module_conf: configuration block of the root module
178 :returns:
179 list of terraform resource blocks
180 """
181
182 blocks: dict[str, list[dict[str, dict[str, Any]]]] = {"resource": [], "data": []}
183 for child_module in child_modules:
184 nested_child_modules = child_module.get("child_modules", [])
185 if nested_child_modules:
186 nested_blocks = _find_child_modules(
187 child_modules=nested_child_modules,
188 resource_changes=resource_changes,
189 root_module_conf=root_module_conf
190 )
191 for block_type, resource_blocks in nested_blocks.items():
192 blocks[block_type].extend(resource_blocks)
193
194 module_address = child_module.get("address", "")
195 module_call_resources = _get_module_call_resources(
196 module_address=module_address,
197 root_module_conf=root_module_conf,
198 )
199
200 for resource in child_module.get("resources", []):
201 module_call_conf = None
202 if module_address and module_call_resources:
203 module_call_conf = next(
204 (
205 module_call_resource
206 for module_call_resource in module_call_resources
207 if f"{module_address}.{module_call_resource['address']}" == resource["address"]
208 ),
209 None
210 )
211
212 resource_block, block_type, prepared = _prepare_resource_block(
213 resource=resource,
214 conf=module_call_conf,
215 resource_changes=resource_changes,
216 )
217 if prepared is True:
218 if block_type == "resource":
219 blocks["resource"].append(resource_block)
220 elif block_type == "data":
221 blocks["data"].append(resource_block)
222 return blocks
223
224
225 def _get_module_call_resources(module_address: str, root_module_conf: dict[str, Any]) -> list[dict[str, Any]]:
226 """Extracts the resources from the 'module_calls' block under 'configuration'"""
227
228 for module_name in module_address.split("."):
229 if module_name == "module":
230 # module names are always prefixed with 'module.', therefore skip it
231 continue
232 root_module_conf = root_module_conf.get("module_calls", {}).get(module_name, {}).get("module", {})
233
234 return root_module_conf.get("resources", [])
235
236
237 def _get_resource_changes(template: dict[str, Any]) -> dict[str, dict[str, Any]]:
238 """Returns a resource address to resource changes dict"""
239
240 resource_changes_map = {}
241
242 resource_changes = template.get("resource_changes")
243 if resource_changes and isinstance(resource_changes, list):
244 resource_changes_map = {
245 change.get("address", ""): change
246 for change in resource_changes
247 }
248
249 return resource_changes_map
250
251
252 def _add_references(obj: dict[str, Any], conf: dict[str, Any], return_resource: dict[str, Any]) -> None:
253 """Adds references to the resources in the TF plan definition"""
254
255 for conf_key, conf_value in conf.items():
256 if not isinstance(conf_value, dict) or "references" not in conf_value:
257 # only interested in dict with a "references" key
258 continue
259
260 ref = next((x for x in conf_value["references"] or [] if not x.startswith(("var.", "local."))), None)
261 if ref:
262 if conf_key not in obj:
263 return_resource[conf_key] = [ref]
264 elif obj[conf_key] is None:
265 return_resource[conf_key] = [ref]
266 elif isinstance(obj[conf_key], list) and any(obj_value is None for obj_value in obj[conf_key]):
267 return_resource[conf_key] = [[obj_value for obj_value in obj[conf_key] if obj_value is not None] + [ref]]
268
269 return_resource.setdefault(CustomAttributes.REFERENCES, []).append(conf_value["references"])
270
271
272 def parse_tf_plan(tf_plan_file: str, out_parsing_errors: Dict[str, str]) -> Tuple[Optional[Dict[str, Any]], Optional[List[Tuple[int, str]]]]:
273 """
274 :type tf_plan_file: str - path to plan file
275 :rtype: tf_definition dictionary and template_lines of the plan file
276 """
277 tf_definition: Dict[str, Any] = {"resource": [], "data": []}
278 template, template_lines = parse(tf_plan_file, out_parsing_errors)
279 if not template:
280 return None, None
281
282 resource_changes = _get_resource_changes(template=template)
283
284 for resource in template.get("planned_values", {}).get("root_module", {}).get("resources", []):
285 conf = next(
286 (
287 x
288 for x in template.get("configuration", {}).get("root_module", {}).get("resources", [])
289 if x["type"] == resource["type"] and x["name"] == resource["name"]
290 ),
291 None,
292 )
293 resource_block, block_type, prepared = _prepare_resource_block(
294 resource=resource,
295 conf=conf,
296 resource_changes=resource_changes,
297 )
298 if prepared is True:
299 if block_type == "resource":
300 tf_definition["resource"].append(resource_block)
301 elif block_type == "data":
302 tf_definition["data"].append(resource_block)
303 child_modules = template.get("planned_values", {}).get("root_module", {}).get("child_modules", [])
304 root_module_conf = template.get("configuration", {}).get("root_module", {})
305 # Terraform supports modules within modules so we need to search
306 # in nested modules to find all resource blocks
307 module_blocks = _find_child_modules(
308 child_modules=child_modules,
309 resource_changes=resource_changes,
310 root_module_conf=root_module_conf,
311 )
312 for block_type, resource_blocks in module_blocks.items():
313 tf_definition[block_type].extend(resource_blocks)
314 return tf_definition, template_lines
315
316
317 def _clean_simple_type_list(value_list: List[Any]) -> List[Any]:
318 """
319 Given a list of simple types return a cleaned list of simple types.
320 Converts booleans that are input as strings back to booleans to maintain consistent expectations for later evaluation.
321 Sometimes Terraform Plan will output Map values as strings regardless of boolean input.
322 """
323 for i in range(len(value_list)):
324 if isinstance(value_list[i], str):
325 lower_case_value = value_list[i].lower()
326 if lower_case_value == "true":
327 value_list[i] = True
328 if lower_case_value == "false":
329 value_list[i] = False
330 return value_list
331
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/plan_parser.py b/checkov/terraform/plan_parser.py
--- a/checkov/terraform/plan_parser.py
+++ b/checkov/terraform/plan_parser.py
@@ -7,12 +7,14 @@
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.common.parsers.node import ListNode
+from checkov.common.util.consts import LINE_FIELD_NAMES
from checkov.common.util.type_forcers import force_list
from checkov.terraform.context_parsers.tf_plan import parse
SIMPLE_TYPES = (str, int, float, bool)
TF_PLAN_RESOURCE_ADDRESS = "__address__"
TF_PLAN_RESOURCE_CHANGE_ACTIONS = "__change_actions__"
+TF_PLAN_RESOURCE_CHANGE_KEYS = "__change_keys__"
RESOURCE_TYPES_JSONIFY = {
"aws_batch_job_definition": "container_properties",
@@ -129,7 +131,6 @@
:param resource: tf planned_values resource block
:param conf: tf configuration resource block
:param resource_changes: tf resource_changes block
-
:returns:
- resource_block: a list of strings representing the header columns
- prepared: whether conditions met to prepare data
@@ -161,6 +162,7 @@
changes = resource_changes.get(resource_address) # type:ignore[arg-type] # becaus eit can be None
if changes:
resource_conf[TF_PLAN_RESOURCE_CHANGE_ACTIONS] = changes.get("change", {}).get("actions") or []
+ resource_conf[TF_PLAN_RESOURCE_CHANGE_KEYS] = changes.get(TF_PLAN_RESOURCE_CHANGE_KEYS) or []
resource_block[resource_type][resource.get("name", "default")] = resource_conf
prepared = True
@@ -186,7 +188,7 @@
nested_blocks = _find_child_modules(
child_modules=nested_child_modules,
resource_changes=resource_changes,
- root_module_conf=root_module_conf
+ root_module_conf=root_module_conf,
)
for block_type, resource_blocks in nested_blocks.items():
blocks[block_type].extend(resource_blocks)
@@ -238,13 +240,24 @@
"""Returns a resource address to resource changes dict"""
resource_changes_map = {}
-
resource_changes = template.get("resource_changes")
+
if resource_changes and isinstance(resource_changes, list):
- resource_changes_map = {
- change.get("address", ""): change
- for change in resource_changes
- }
+ for resource in resource_changes:
+ resource_changes_map[resource["address"]] = resource
+ changes = []
+
+ # before + after are None when resources are created/destroyed, so make them safe
+ change_before = resource["change"]["before"] or {}
+ change_after = resource["change"]["after"] or {}
+
+ for field, value in change_before.items():
+ if field in LINE_FIELD_NAMES:
+ continue # don't care about line #s
+ if value != change_after.get(field):
+ changes.append(field)
+
+ resource_changes_map[resource["address"]][TF_PLAN_RESOURCE_CHANGE_KEYS] = changes
return resource_changes_map
|
{"golden_diff": "diff --git a/checkov/terraform/plan_parser.py b/checkov/terraform/plan_parser.py\n--- a/checkov/terraform/plan_parser.py\n+++ b/checkov/terraform/plan_parser.py\n@@ -7,12 +7,14 @@\n \n from checkov.common.graph.graph_builder import CustomAttributes\n from checkov.common.parsers.node import ListNode\n+from checkov.common.util.consts import LINE_FIELD_NAMES\n from checkov.common.util.type_forcers import force_list\n from checkov.terraform.context_parsers.tf_plan import parse\n \n SIMPLE_TYPES = (str, int, float, bool)\n TF_PLAN_RESOURCE_ADDRESS = \"__address__\"\n TF_PLAN_RESOURCE_CHANGE_ACTIONS = \"__change_actions__\"\n+TF_PLAN_RESOURCE_CHANGE_KEYS = \"__change_keys__\"\n \n RESOURCE_TYPES_JSONIFY = {\n \"aws_batch_job_definition\": \"container_properties\",\n@@ -129,7 +131,6 @@\n :param resource: tf planned_values resource block\n :param conf: tf configuration resource block\n :param resource_changes: tf resource_changes block\n-\n :returns:\n - resource_block: a list of strings representing the header columns\n - prepared: whether conditions met to prepare data\n@@ -161,6 +162,7 @@\n changes = resource_changes.get(resource_address) # type:ignore[arg-type] # becaus eit can be None\n if changes:\n resource_conf[TF_PLAN_RESOURCE_CHANGE_ACTIONS] = changes.get(\"change\", {}).get(\"actions\") or []\n+ resource_conf[TF_PLAN_RESOURCE_CHANGE_KEYS] = changes.get(TF_PLAN_RESOURCE_CHANGE_KEYS) or []\n \n resource_block[resource_type][resource.get(\"name\", \"default\")] = resource_conf\n prepared = True\n@@ -186,7 +188,7 @@\n nested_blocks = _find_child_modules(\n child_modules=nested_child_modules,\n resource_changes=resource_changes,\n- root_module_conf=root_module_conf\n+ root_module_conf=root_module_conf,\n )\n for block_type, resource_blocks in nested_blocks.items():\n blocks[block_type].extend(resource_blocks)\n@@ -238,13 +240,24 @@\n \"\"\"Returns a resource address to resource changes dict\"\"\"\n \n resource_changes_map = {}\n-\n resource_changes = template.get(\"resource_changes\")\n+\n if resource_changes and isinstance(resource_changes, list):\n- resource_changes_map = {\n- change.get(\"address\", \"\"): change\n- for change in resource_changes\n- }\n+ for resource in resource_changes:\n+ resource_changes_map[resource[\"address\"]] = resource\n+ changes = []\n+\n+ # before + after are None when resources are created/destroyed, so make them safe\n+ change_before = resource[\"change\"][\"before\"] or {}\n+ change_after = resource[\"change\"][\"after\"] or {}\n+\n+ for field, value in change_before.items():\n+ if field in LINE_FIELD_NAMES:\n+ continue # don't care about line #s\n+ if value != change_after.get(field):\n+ changes.append(field)\n+\n+ resource_changes_map[resource[\"address\"]][TF_PLAN_RESOURCE_CHANGE_KEYS] = changes\n \n return resource_changes_map\n", "issue": "[checks][python] How to determine which fields are changing in a resource\nWhile you can use `TF_PLAN_RESOURCE_CHANGE_ACTIONS` to see \"did a resource change\", I'm wanting to know \"what field(s) changed\"?\r\n\r\nThe plan data definitely exposes this, so it should be possible, at the very least. Is this already available and just not explicitly documented? If not, is that on the roadmap to add?\n", "before_files": [{"content": "from __future__ import annotations\n\nimport itertools\nimport json\nimport logging\nfrom typing import Optional, Tuple, Dict, List, Any, cast\n\nfrom checkov.common.graph.graph_builder import CustomAttributes\nfrom checkov.common.parsers.node import ListNode\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.context_parsers.tf_plan import parse\n\nSIMPLE_TYPES = (str, int, float, bool)\nTF_PLAN_RESOURCE_ADDRESS = \"__address__\"\nTF_PLAN_RESOURCE_CHANGE_ACTIONS = \"__change_actions__\"\n\nRESOURCE_TYPES_JSONIFY = {\n \"aws_batch_job_definition\": \"container_properties\",\n \"aws_ecs_task_definition\": \"container_definitions\",\n \"aws_iam_policy\": \"policy\",\n \"aws_iam_role\": \"assume_role_policy\",\n \"aws_iam_role_policy\": \"policy\",\n \"aws_iam_group_policy\": \"policy\",\n \"aws_iam_user_policy\": \"policy\",\n \"aws_ssoadmin_permission_set_inline_policy\": \"inline_policy\",\n}\n\n\ndef _is_simple_type(obj: Any) -> bool:\n if obj is None:\n return True\n if isinstance(obj, SIMPLE_TYPES):\n return True\n return False\n\n\ndef _is_list_of_simple_types(obj: Any) -> bool:\n if not isinstance(obj, list):\n return False\n for i in obj:\n if not _is_simple_type(i):\n return False\n return True\n\n\ndef _is_list_of_dicts(obj: Any) -> bool:\n if not isinstance(obj, list):\n return False\n for i in obj:\n if isinstance(i, dict):\n return True\n return False\n\n\ndef _hclify(\n obj: dict[str, Any],\n conf: dict[str, Any] | None = None,\n parent_key: str | None = None,\n resource_type: str | None = None,\n) -> dict[str, list[Any]]:\n ret_dict = {}\n\n if not isinstance(obj, dict):\n raise Exception(\"this method receives only dicts\")\n\n if hasattr(obj, \"start_mark\") and hasattr(obj, \"end_mark\"):\n obj[\"start_line\"] = obj.start_mark.line\n obj[\"end_line\"] = obj.end_mark.line\n for key, value in obj.items():\n if _is_simple_type(value) or _is_list_of_simple_types(value):\n if parent_key == \"tags\":\n ret_dict[key] = value\n else:\n # only wrap non-lists into a list\n ret_dict[key] = _clean_simple_type_list([value])\n\n if _is_list_of_dicts(value):\n child_list = []\n conf_val = conf.get(key, []) if conf else []\n if not isinstance(conf_val, list):\n # this occurs, when a resource in the current state has no value for that argument\n conf_val = [conf_val]\n\n for internal_val, internal_conf_val in itertools.zip_longest(value, conf_val):\n if isinstance(internal_val, dict):\n child_list.append(_hclify(internal_val, internal_conf_val, parent_key=key))\n if key == \"tags\":\n ret_dict[key] = [child_list]\n else:\n ret_dict[key] = child_list\n if isinstance(value, dict):\n child_dict = _hclify(value, parent_key=key)\n if parent_key == \"tags\":\n ret_dict[key] = child_dict\n else:\n ret_dict[key] = [child_dict]\n if conf and isinstance(conf, dict):\n _add_references(obj=obj, conf=conf, return_resource=ret_dict)\n\n if resource_type and resource_type in RESOURCE_TYPES_JSONIFY:\n # values shouldn't be encapsulated in lists\n dict_value = jsonify(obj=obj, resource_type=resource_type)\n if dict_value is not None:\n ret_dict[RESOURCE_TYPES_JSONIFY[resource_type]] = force_list(dict_value)\n\n return ret_dict\n\n\ndef jsonify(obj: dict[str, Any], resource_type: str) -> dict[str, Any] | None:\n \"\"\"Tries to create a dict from a string of a supported resource type attribute\"\"\"\n\n jsonify_key = RESOURCE_TYPES_JSONIFY[resource_type]\n if jsonify_key in obj:\n try:\n return cast(\"dict[str, Any]\", json.loads(obj[jsonify_key]))\n except json.JSONDecodeError:\n logging.debug(\n f\"Attribute {jsonify_key} of resource type {resource_type} is not json encoded {obj[jsonify_key]}\"\n )\n\n return None\n\n\ndef _prepare_resource_block(\n resource: dict[str, Any], conf: dict[str, Any] | None, resource_changes: dict[str, dict[str, Any]]\n) -> tuple[dict[str, dict[str, Any]], str, bool]:\n \"\"\"hclify resource if pre-conditions met.\n\n :param resource: tf planned_values resource block\n :param conf: tf configuration resource block\n :param resource_changes: tf resource_changes block\n\n :returns:\n - resource_block: a list of strings representing the header columns\n - prepared: whether conditions met to prepare data\n \"\"\"\n\n resource_block: Dict[str, Dict[str, Any]] = {}\n resource_type = resource[\"type\"]\n resource_block[resource_type] = {}\n prepared = False\n mode = \"\"\n block_type = \"\"\n if \"mode\" in resource:\n mode = resource[\"mode\"]\n block_type = \"data\" if mode == \"data\" else \"resource\"\n\n # Rare cases where data block appears in resources with same name as resource block and only partial values\n # and where *_module resources don't have values field\n if mode in (\"managed\", \"data\"):\n expressions = conf.get(\"expressions\") if conf else None\n\n resource_conf = _hclify(\n obj=resource.get(\"values\", {\"start_line\": 0, \"end_line\": 0}),\n conf=expressions,\n resource_type=resource_type,\n )\n resource_address: str | None = resource.get(\"address\")\n resource_conf[TF_PLAN_RESOURCE_ADDRESS] = resource_address # type:ignore[assignment] # special field\n\n changes = resource_changes.get(resource_address) # type:ignore[arg-type] # becaus eit can be None\n if changes:\n resource_conf[TF_PLAN_RESOURCE_CHANGE_ACTIONS] = changes.get(\"change\", {}).get(\"actions\") or []\n\n resource_block[resource_type][resource.get(\"name\", \"default\")] = resource_conf\n prepared = True\n return resource_block, block_type, prepared\n\n\ndef _find_child_modules(\n child_modules: ListNode, resource_changes: dict[str, dict[str, Any]], root_module_conf: dict[str, Any]\n) -> dict[str, list[dict[str, dict[str, Any]]]]:\n \"\"\" Find all child modules if any. Including any amount of nested child modules.\n\n :param child_modules: list of terraform child_module objects\n :param resource_changes: a resource address to resource changes dict\n :param root_module_conf: configuration block of the root module\n :returns:\n list of terraform resource blocks\n \"\"\"\n\n blocks: dict[str, list[dict[str, dict[str, Any]]]] = {\"resource\": [], \"data\": []}\n for child_module in child_modules:\n nested_child_modules = child_module.get(\"child_modules\", [])\n if nested_child_modules:\n nested_blocks = _find_child_modules(\n child_modules=nested_child_modules,\n resource_changes=resource_changes,\n root_module_conf=root_module_conf\n )\n for block_type, resource_blocks in nested_blocks.items():\n blocks[block_type].extend(resource_blocks)\n\n module_address = child_module.get(\"address\", \"\")\n module_call_resources = _get_module_call_resources(\n module_address=module_address,\n root_module_conf=root_module_conf,\n )\n\n for resource in child_module.get(\"resources\", []):\n module_call_conf = None\n if module_address and module_call_resources:\n module_call_conf = next(\n (\n module_call_resource\n for module_call_resource in module_call_resources\n if f\"{module_address}.{module_call_resource['address']}\" == resource[\"address\"]\n ),\n None\n )\n\n resource_block, block_type, prepared = _prepare_resource_block(\n resource=resource,\n conf=module_call_conf,\n resource_changes=resource_changes,\n )\n if prepared is True:\n if block_type == \"resource\":\n blocks[\"resource\"].append(resource_block)\n elif block_type == \"data\":\n blocks[\"data\"].append(resource_block)\n return blocks\n\n\ndef _get_module_call_resources(module_address: str, root_module_conf: dict[str, Any]) -> list[dict[str, Any]]:\n \"\"\"Extracts the resources from the 'module_calls' block under 'configuration'\"\"\"\n\n for module_name in module_address.split(\".\"):\n if module_name == \"module\":\n # module names are always prefixed with 'module.', therefore skip it\n continue\n root_module_conf = root_module_conf.get(\"module_calls\", {}).get(module_name, {}).get(\"module\", {})\n\n return root_module_conf.get(\"resources\", [])\n\n\ndef _get_resource_changes(template: dict[str, Any]) -> dict[str, dict[str, Any]]:\n \"\"\"Returns a resource address to resource changes dict\"\"\"\n\n resource_changes_map = {}\n\n resource_changes = template.get(\"resource_changes\")\n if resource_changes and isinstance(resource_changes, list):\n resource_changes_map = {\n change.get(\"address\", \"\"): change\n for change in resource_changes\n }\n\n return resource_changes_map\n\n\ndef _add_references(obj: dict[str, Any], conf: dict[str, Any], return_resource: dict[str, Any]) -> None:\n \"\"\"Adds references to the resources in the TF plan definition\"\"\"\n\n for conf_key, conf_value in conf.items():\n if not isinstance(conf_value, dict) or \"references\" not in conf_value:\n # only interested in dict with a \"references\" key\n continue\n\n ref = next((x for x in conf_value[\"references\"] or [] if not x.startswith((\"var.\", \"local.\"))), None)\n if ref:\n if conf_key not in obj:\n return_resource[conf_key] = [ref]\n elif obj[conf_key] is None:\n return_resource[conf_key] = [ref]\n elif isinstance(obj[conf_key], list) and any(obj_value is None for obj_value in obj[conf_key]):\n return_resource[conf_key] = [[obj_value for obj_value in obj[conf_key] if obj_value is not None] + [ref]]\n\n return_resource.setdefault(CustomAttributes.REFERENCES, []).append(conf_value[\"references\"])\n\n\ndef parse_tf_plan(tf_plan_file: str, out_parsing_errors: Dict[str, str]) -> Tuple[Optional[Dict[str, Any]], Optional[List[Tuple[int, str]]]]:\n \"\"\"\n :type tf_plan_file: str - path to plan file\n :rtype: tf_definition dictionary and template_lines of the plan file\n \"\"\"\n tf_definition: Dict[str, Any] = {\"resource\": [], \"data\": []}\n template, template_lines = parse(tf_plan_file, out_parsing_errors)\n if not template:\n return None, None\n\n resource_changes = _get_resource_changes(template=template)\n\n for resource in template.get(\"planned_values\", {}).get(\"root_module\", {}).get(\"resources\", []):\n conf = next(\n (\n x\n for x in template.get(\"configuration\", {}).get(\"root_module\", {}).get(\"resources\", [])\n if x[\"type\"] == resource[\"type\"] and x[\"name\"] == resource[\"name\"]\n ),\n None,\n )\n resource_block, block_type, prepared = _prepare_resource_block(\n resource=resource,\n conf=conf,\n resource_changes=resource_changes,\n )\n if prepared is True:\n if block_type == \"resource\":\n tf_definition[\"resource\"].append(resource_block)\n elif block_type == \"data\":\n tf_definition[\"data\"].append(resource_block)\n child_modules = template.get(\"planned_values\", {}).get(\"root_module\", {}).get(\"child_modules\", [])\n root_module_conf = template.get(\"configuration\", {}).get(\"root_module\", {})\n # Terraform supports modules within modules so we need to search\n # in nested modules to find all resource blocks\n module_blocks = _find_child_modules(\n child_modules=child_modules,\n resource_changes=resource_changes,\n root_module_conf=root_module_conf,\n )\n for block_type, resource_blocks in module_blocks.items():\n tf_definition[block_type].extend(resource_blocks)\n return tf_definition, template_lines\n\n\ndef _clean_simple_type_list(value_list: List[Any]) -> List[Any]:\n \"\"\"\n Given a list of simple types return a cleaned list of simple types.\n Converts booleans that are input as strings back to booleans to maintain consistent expectations for later evaluation.\n Sometimes Terraform Plan will output Map values as strings regardless of boolean input.\n \"\"\"\n for i in range(len(value_list)):\n if isinstance(value_list[i], str):\n lower_case_value = value_list[i].lower()\n if lower_case_value == \"true\":\n value_list[i] = True\n if lower_case_value == \"false\":\n value_list[i] = False\n return value_list\n", "path": "checkov/terraform/plan_parser.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport itertools\nimport json\nimport logging\nfrom typing import Optional, Tuple, Dict, List, Any, cast\n\nfrom checkov.common.graph.graph_builder import CustomAttributes\nfrom checkov.common.parsers.node import ListNode\nfrom checkov.common.util.consts import LINE_FIELD_NAMES\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.context_parsers.tf_plan import parse\n\nSIMPLE_TYPES = (str, int, float, bool)\nTF_PLAN_RESOURCE_ADDRESS = \"__address__\"\nTF_PLAN_RESOURCE_CHANGE_ACTIONS = \"__change_actions__\"\nTF_PLAN_RESOURCE_CHANGE_KEYS = \"__change_keys__\"\n\nRESOURCE_TYPES_JSONIFY = {\n \"aws_batch_job_definition\": \"container_properties\",\n \"aws_ecs_task_definition\": \"container_definitions\",\n \"aws_iam_policy\": \"policy\",\n \"aws_iam_role\": \"assume_role_policy\",\n \"aws_iam_role_policy\": \"policy\",\n \"aws_iam_group_policy\": \"policy\",\n \"aws_iam_user_policy\": \"policy\",\n \"aws_ssoadmin_permission_set_inline_policy\": \"inline_policy\",\n}\n\n\ndef _is_simple_type(obj: Any) -> bool:\n if obj is None:\n return True\n if isinstance(obj, SIMPLE_TYPES):\n return True\n return False\n\n\ndef _is_list_of_simple_types(obj: Any) -> bool:\n if not isinstance(obj, list):\n return False\n for i in obj:\n if not _is_simple_type(i):\n return False\n return True\n\n\ndef _is_list_of_dicts(obj: Any) -> bool:\n if not isinstance(obj, list):\n return False\n for i in obj:\n if isinstance(i, dict):\n return True\n return False\n\n\ndef _hclify(\n obj: dict[str, Any],\n conf: dict[str, Any] | None = None,\n parent_key: str | None = None,\n resource_type: str | None = None,\n) -> dict[str, list[Any]]:\n ret_dict = {}\n\n if not isinstance(obj, dict):\n raise Exception(\"this method receives only dicts\")\n\n if hasattr(obj, \"start_mark\") and hasattr(obj, \"end_mark\"):\n obj[\"start_line\"] = obj.start_mark.line\n obj[\"end_line\"] = obj.end_mark.line\n for key, value in obj.items():\n if _is_simple_type(value) or _is_list_of_simple_types(value):\n if parent_key == \"tags\":\n ret_dict[key] = value\n else:\n # only wrap non-lists into a list\n ret_dict[key] = _clean_simple_type_list([value])\n\n if _is_list_of_dicts(value):\n child_list = []\n conf_val = conf.get(key, []) if conf else []\n if not isinstance(conf_val, list):\n # this occurs, when a resource in the current state has no value for that argument\n conf_val = [conf_val]\n\n for internal_val, internal_conf_val in itertools.zip_longest(value, conf_val):\n if isinstance(internal_val, dict):\n child_list.append(_hclify(internal_val, internal_conf_val, parent_key=key))\n if key == \"tags\":\n ret_dict[key] = [child_list]\n else:\n ret_dict[key] = child_list\n if isinstance(value, dict):\n child_dict = _hclify(value, parent_key=key)\n if parent_key == \"tags\":\n ret_dict[key] = child_dict\n else:\n ret_dict[key] = [child_dict]\n if conf and isinstance(conf, dict):\n _add_references(obj=obj, conf=conf, return_resource=ret_dict)\n\n if resource_type and resource_type in RESOURCE_TYPES_JSONIFY:\n # values shouldn't be encapsulated in lists\n dict_value = jsonify(obj=obj, resource_type=resource_type)\n if dict_value is not None:\n ret_dict[RESOURCE_TYPES_JSONIFY[resource_type]] = force_list(dict_value)\n\n return ret_dict\n\n\ndef jsonify(obj: dict[str, Any], resource_type: str) -> dict[str, Any] | None:\n \"\"\"Tries to create a dict from a string of a supported resource type attribute\"\"\"\n\n jsonify_key = RESOURCE_TYPES_JSONIFY[resource_type]\n if jsonify_key in obj:\n try:\n return cast(\"dict[str, Any]\", json.loads(obj[jsonify_key]))\n except json.JSONDecodeError:\n logging.debug(\n f\"Attribute {jsonify_key} of resource type {resource_type} is not json encoded {obj[jsonify_key]}\"\n )\n\n return None\n\n\ndef _prepare_resource_block(\n resource: dict[str, Any], conf: dict[str, Any] | None, resource_changes: dict[str, dict[str, Any]]\n) -> tuple[dict[str, dict[str, Any]], str, bool]:\n \"\"\"hclify resource if pre-conditions met.\n\n :param resource: tf planned_values resource block\n :param conf: tf configuration resource block\n :param resource_changes: tf resource_changes block\n :returns:\n - resource_block: a list of strings representing the header columns\n - prepared: whether conditions met to prepare data\n \"\"\"\n\n resource_block: Dict[str, Dict[str, Any]] = {}\n resource_type = resource[\"type\"]\n resource_block[resource_type] = {}\n prepared = False\n mode = \"\"\n block_type = \"\"\n if \"mode\" in resource:\n mode = resource[\"mode\"]\n block_type = \"data\" if mode == \"data\" else \"resource\"\n\n # Rare cases where data block appears in resources with same name as resource block and only partial values\n # and where *_module resources don't have values field\n if mode in (\"managed\", \"data\"):\n expressions = conf.get(\"expressions\") if conf else None\n\n resource_conf = _hclify(\n obj=resource.get(\"values\", {\"start_line\": 0, \"end_line\": 0}),\n conf=expressions,\n resource_type=resource_type,\n )\n resource_address: str | None = resource.get(\"address\")\n resource_conf[TF_PLAN_RESOURCE_ADDRESS] = resource_address # type:ignore[assignment] # special field\n\n changes = resource_changes.get(resource_address) # type:ignore[arg-type] # becaus eit can be None\n if changes:\n resource_conf[TF_PLAN_RESOURCE_CHANGE_ACTIONS] = changes.get(\"change\", {}).get(\"actions\") or []\n resource_conf[TF_PLAN_RESOURCE_CHANGE_KEYS] = changes.get(TF_PLAN_RESOURCE_CHANGE_KEYS) or []\n\n resource_block[resource_type][resource.get(\"name\", \"default\")] = resource_conf\n prepared = True\n return resource_block, block_type, prepared\n\n\ndef _find_child_modules(\n child_modules: ListNode, resource_changes: dict[str, dict[str, Any]], root_module_conf: dict[str, Any]\n) -> dict[str, list[dict[str, dict[str, Any]]]]:\n \"\"\" Find all child modules if any. Including any amount of nested child modules.\n\n :param child_modules: list of terraform child_module objects\n :param resource_changes: a resource address to resource changes dict\n :param root_module_conf: configuration block of the root module\n :returns:\n list of terraform resource blocks\n \"\"\"\n\n blocks: dict[str, list[dict[str, dict[str, Any]]]] = {\"resource\": [], \"data\": []}\n for child_module in child_modules:\n nested_child_modules = child_module.get(\"child_modules\", [])\n if nested_child_modules:\n nested_blocks = _find_child_modules(\n child_modules=nested_child_modules,\n resource_changes=resource_changes,\n root_module_conf=root_module_conf,\n )\n for block_type, resource_blocks in nested_blocks.items():\n blocks[block_type].extend(resource_blocks)\n\n module_address = child_module.get(\"address\", \"\")\n module_call_resources = _get_module_call_resources(\n module_address=module_address,\n root_module_conf=root_module_conf,\n )\n\n for resource in child_module.get(\"resources\", []):\n module_call_conf = None\n if module_address and module_call_resources:\n module_call_conf = next(\n (\n module_call_resource\n for module_call_resource in module_call_resources\n if f\"{module_address}.{module_call_resource['address']}\" == resource[\"address\"]\n ),\n None\n )\n\n resource_block, block_type, prepared = _prepare_resource_block(\n resource=resource,\n conf=module_call_conf,\n resource_changes=resource_changes,\n )\n if prepared is True:\n if block_type == \"resource\":\n blocks[\"resource\"].append(resource_block)\n elif block_type == \"data\":\n blocks[\"data\"].append(resource_block)\n return blocks\n\n\ndef _get_module_call_resources(module_address: str, root_module_conf: dict[str, Any]) -> list[dict[str, Any]]:\n \"\"\"Extracts the resources from the 'module_calls' block under 'configuration'\"\"\"\n\n for module_name in module_address.split(\".\"):\n if module_name == \"module\":\n # module names are always prefixed with 'module.', therefore skip it\n continue\n root_module_conf = root_module_conf.get(\"module_calls\", {}).get(module_name, {}).get(\"module\", {})\n\n return root_module_conf.get(\"resources\", [])\n\n\ndef _get_resource_changes(template: dict[str, Any]) -> dict[str, dict[str, Any]]:\n \"\"\"Returns a resource address to resource changes dict\"\"\"\n\n resource_changes_map = {}\n resource_changes = template.get(\"resource_changes\")\n\n if resource_changes and isinstance(resource_changes, list):\n for resource in resource_changes:\n resource_changes_map[resource[\"address\"]] = resource\n changes = []\n\n # before + after are None when resources are created/destroyed, so make them safe\n change_before = resource[\"change\"][\"before\"] or {}\n change_after = resource[\"change\"][\"after\"] or {}\n\n for field, value in change_before.items():\n if field in LINE_FIELD_NAMES:\n continue # don't care about line #s\n if value != change_after.get(field):\n changes.append(field)\n\n resource_changes_map[resource[\"address\"]][TF_PLAN_RESOURCE_CHANGE_KEYS] = changes\n\n return resource_changes_map\n\n\ndef _add_references(obj: dict[str, Any], conf: dict[str, Any], return_resource: dict[str, Any]) -> None:\n \"\"\"Adds references to the resources in the TF plan definition\"\"\"\n\n for conf_key, conf_value in conf.items():\n if not isinstance(conf_value, dict) or \"references\" not in conf_value:\n # only interested in dict with a \"references\" key\n continue\n\n ref = next((x for x in conf_value[\"references\"] or [] if not x.startswith((\"var.\", \"local.\"))), None)\n if ref:\n if conf_key not in obj:\n return_resource[conf_key] = [ref]\n elif obj[conf_key] is None:\n return_resource[conf_key] = [ref]\n elif isinstance(obj[conf_key], list) and any(obj_value is None for obj_value in obj[conf_key]):\n return_resource[conf_key] = [[obj_value for obj_value in obj[conf_key] if obj_value is not None] + [ref]]\n\n return_resource.setdefault(CustomAttributes.REFERENCES, []).append(conf_value[\"references\"])\n\n\ndef parse_tf_plan(tf_plan_file: str, out_parsing_errors: Dict[str, str]) -> Tuple[Optional[Dict[str, Any]], Optional[List[Tuple[int, str]]]]:\n \"\"\"\n :type tf_plan_file: str - path to plan file\n :rtype: tf_definition dictionary and template_lines of the plan file\n \"\"\"\n tf_definition: Dict[str, Any] = {\"resource\": [], \"data\": []}\n template, template_lines = parse(tf_plan_file, out_parsing_errors)\n if not template:\n return None, None\n\n resource_changes = _get_resource_changes(template=template)\n\n for resource in template.get(\"planned_values\", {}).get(\"root_module\", {}).get(\"resources\", []):\n conf = next(\n (\n x\n for x in template.get(\"configuration\", {}).get(\"root_module\", {}).get(\"resources\", [])\n if x[\"type\"] == resource[\"type\"] and x[\"name\"] == resource[\"name\"]\n ),\n None,\n )\n resource_block, block_type, prepared = _prepare_resource_block(\n resource=resource,\n conf=conf,\n resource_changes=resource_changes,\n )\n if prepared is True:\n if block_type == \"resource\":\n tf_definition[\"resource\"].append(resource_block)\n elif block_type == \"data\":\n tf_definition[\"data\"].append(resource_block)\n child_modules = template.get(\"planned_values\", {}).get(\"root_module\", {}).get(\"child_modules\", [])\n root_module_conf = template.get(\"configuration\", {}).get(\"root_module\", {})\n # Terraform supports modules within modules so we need to search\n # in nested modules to find all resource blocks\n module_blocks = _find_child_modules(\n child_modules=child_modules,\n resource_changes=resource_changes,\n root_module_conf=root_module_conf,\n )\n for block_type, resource_blocks in module_blocks.items():\n tf_definition[block_type].extend(resource_blocks)\n return tf_definition, template_lines\n\n\ndef _clean_simple_type_list(value_list: List[Any]) -> List[Any]:\n \"\"\"\n Given a list of simple types return a cleaned list of simple types.\n Converts booleans that are input as strings back to booleans to maintain consistent expectations for later evaluation.\n Sometimes Terraform Plan will output Map values as strings regardless of boolean input.\n \"\"\"\n for i in range(len(value_list)):\n if isinstance(value_list[i], str):\n lower_case_value = value_list[i].lower()\n if lower_case_value == \"true\":\n value_list[i] = True\n if lower_case_value == \"false\":\n value_list[i] = False\n return value_list\n", "path": "checkov/terraform/plan_parser.py"}]}
| 4,085 | 686 |
gh_patches_debug_21318
|
rasdani/github-patches
|
git_diff
|
openai__gym-1400
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
why MultiDiscrete dtype=int8
MultiDiscrete dtype is hardcoded as int8.
Is this a bug ? What happens when we need more than 256 category
```
space = MultiDiscrete([7, 1024])
space.sample()
```
https://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/multi_discrete.py`
Content:
```
1 import gym
2 import numpy as np
3 from .space import Space
4
5
6 class MultiDiscrete(Space):
7 """
8 - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs
9 - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
10 - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space
11
12 Note: A value of 0 always need to represent the NOOP action.
13
14 e.g. Nintendo Game Controller
15 - Can be conceptualized as 3 discrete action spaces:
16
17 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
18 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
19 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
20
21 - Can be initialized as
22
23 MultiDiscrete([ 5, 2, 2 ])
24
25 """
26 def __init__(self, nvec):
27
28 """
29 nvec: vector of counts of each categorical variable
30 """
31 assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
32 self.nvec = np.asarray(nvec, dtype=np.uint32)
33
34 super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
35 self.np_random = np.random.RandomState()
36
37 def seed(self, seed):
38 self.np_random.seed(seed)
39
40 def sample(self):
41 return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
42
43 def contains(self, x):
44 # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
45 # is within correct bounds for space dtype (even though x does not have to be unsigned)
46 return (0 <= x).all() and (x < self.nvec).all()
47
48 def to_jsonable(self, sample_n):
49 return [sample.tolist() for sample in sample_n]
50
51 def from_jsonable(self, sample_n):
52 return np.array(sample_n)
53
54 def __repr__(self):
55 return "MultiDiscrete({})".format(self.nvec)
56
57 def __eq__(self, other):
58 return np.all(self.nvec == other.nvec)
59
```
Path: `gym/spaces/space.py`
Content:
```
1 import numpy as np
2
3
4 class Space(object):
5 """Defines the observation and action spaces, so you can write generic
6 code that applies to any Env. For example, you can choose a random
7 action.
8 """
9 def __init__(self, shape=None, dtype=None):
10 import numpy as np # takes about 300-400ms to import, so we load lazily
11 self.shape = None if shape is None else tuple(shape)
12 self.dtype = None if dtype is None else np.dtype(dtype)
13
14 def sample(self):
15 """
16 Uniformly randomly sample a random element of this space
17 """
18 raise NotImplementedError
19
20 def seed(self, seed):
21 """Set the seed for this space's pseudo-random number generator. """
22 raise NotImplementedError
23
24 def contains(self, x):
25 """
26 Return boolean specifying if x is a valid
27 member of this space
28 """
29 raise NotImplementedError
30
31 def __contains__(self, x):
32 return self.contains(x)
33
34 def to_jsonable(self, sample_n):
35 """Convert a batch of samples from this space to a JSONable data type."""
36 # By default, assume identity is JSONable
37 return sample_n
38
39 def from_jsonable(self, sample_n):
40 """Convert a JSONable data type to a batch of samples from this space."""
41 # By default, assume identity is JSONable
42 return sample_n
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -1,4 +1,3 @@
-import gym
import numpy as np
from .space import Space
@@ -29,9 +28,9 @@
nvec: vector of counts of each categorical variable
"""
assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
- self.nvec = np.asarray(nvec, dtype=np.uint32)
+ self.nvec = np.asarray(nvec, dtype=np.int64)
- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)
self.np_random = np.random.RandomState()
def seed(self, seed):
diff --git a/gym/spaces/space.py b/gym/spaces/space.py
--- a/gym/spaces/space.py
+++ b/gym/spaces/space.py
@@ -1,6 +1,3 @@
-import numpy as np
-
-
class Space(object):
"""Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
|
{"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -1,4 +1,3 @@\n-import gym\n import numpy as np\n from .space import Space\n \n@@ -29,9 +28,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n- self.nvec = np.asarray(nvec, dtype=np.uint32)\n+ self.nvec = np.asarray(nvec, dtype=np.int64)\n \n- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)\n self.np_random = np.random.RandomState()\n \n def seed(self, seed):\ndiff --git a/gym/spaces/space.py b/gym/spaces/space.py\n--- a/gym/spaces/space.py\n+++ b/gym/spaces/space.py\n@@ -1,6 +1,3 @@\n-import numpy as np\n-\n-\n class Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n", "issue": "why MultiDiscrete dtype=int8 \nMultiDiscrete dtype is hardcoded as int8. \r\nIs this a bug ? What happens when we need more than 256 category \r\n```\r\nspace = MultiDiscrete([7, 1024])\r\nspace.sample()\r\n```\r\n\r\nhttps://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10\n", "before_files": [{"content": "import gym\nimport numpy as np\nfrom .space import Space\n\n\nclass MultiDiscrete(Space):\n \"\"\"\n - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs\n - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space\n\n Note: A value of 0 always need to represent the NOOP action.\n\n e.g. Nintendo Game Controller\n - Can be conceptualized as 3 discrete action spaces:\n\n 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4\n 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n\n - Can be initialized as\n\n MultiDiscrete([ 5, 2, 2 ])\n\n \"\"\"\n def __init__(self, nvec):\n \n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n self.nvec = np.asarray(nvec, dtype=np.uint32)\n\n super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n self.np_random = np.random.RandomState()\n\n def seed(self, seed):\n self.np_random.seed(seed)\n\n def sample(self):\n return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n\n def contains(self, x):\n # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x\n # is within correct bounds for space dtype (even though x does not have to be unsigned)\n return (0 <= x).all() and (x < self.nvec).all()\n\n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n\n def __repr__(self):\n return \"MultiDiscrete({})\".format(self.nvec)\n\n def __eq__(self, other):\n return np.all(self.nvec == other.nvec)\n", "path": "gym/spaces/multi_discrete.py"}, {"content": "import numpy as np\n\n\nclass Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n\n def sample(self):\n \"\"\"\n Uniformly randomly sample a random element of this space\n \"\"\"\n raise NotImplementedError\n\n def seed(self, seed):\n \"\"\"Set the seed for this space's pseudo-random number generator. \"\"\"\n raise NotImplementedError\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n", "path": "gym/spaces/space.py"}], "after_files": [{"content": "import numpy as np\nfrom .space import Space\n\n\nclass MultiDiscrete(Space):\n \"\"\"\n - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs\n - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space\n\n Note: A value of 0 always need to represent the NOOP action.\n\n e.g. Nintendo Game Controller\n - Can be conceptualized as 3 discrete action spaces:\n\n 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4\n 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n\n - Can be initialized as\n\n MultiDiscrete([ 5, 2, 2 ])\n\n \"\"\"\n def __init__(self, nvec):\n \n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n self.nvec = np.asarray(nvec, dtype=np.int64)\n\n super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)\n self.np_random = np.random.RandomState()\n\n def seed(self, seed):\n self.np_random.seed(seed)\n\n def sample(self):\n return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n\n def contains(self, x):\n # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x\n # is within correct bounds for space dtype (even though x does not have to be unsigned)\n return (0 <= x).all() and (x < self.nvec).all()\n\n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n\n def __repr__(self):\n return \"MultiDiscrete({})\".format(self.nvec)\n\n def __eq__(self, other):\n return np.all(self.nvec == other.nvec)\n", "path": "gym/spaces/multi_discrete.py"}, {"content": "class Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n\n def sample(self):\n \"\"\"\n Uniformly randomly sample a random element of this space\n \"\"\"\n raise NotImplementedError\n\n def seed(self, seed):\n \"\"\"Set the seed for this space's pseudo-random number generator. \"\"\"\n raise NotImplementedError\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n", "path": "gym/spaces/space.py"}]}
| 1,484 | 314 |
gh_patches_debug_56609
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-678
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade Django to 3.0
Django 3.0 is out, and since it is a major release, we should consider upgrading to this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.24.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']
13
14 REQUIRES = [
15 'asdf>=2.3.3',
16 'astropy>=3.2.1',
17 'astroquery>=0.3.9',
18 'authlib',
19 'bokeh>=1.0,<1.4',
20 'codecov',
21 'crds',
22 'cryptography',
23 'django>=2.0,<3.0',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'jwst',
31 'matplotlib',
32 'nodejs',
33 'numpy',
34 'numpydoc',
35 'pandas',
36 'psycopg2',
37 'pysiaf',
38 'pytest',
39 'pytest-cov',
40 'scipy',
41 'sphinx',
42 'sqlalchemy',
43 'stsci_rtd_theme',
44 'twine',
45 'wtforms'
46 ]
47
48 setup(
49 name='jwql',
50 version=VERSION,
51 description=DESCRIPTION,
52 url='https://github.com/spacetelescope/jwql.git',
53 author=AUTHORS,
54 author_email='[email protected]',
55 license='BSD',
56 keywords=['astronomy', 'python'],
57 classifiers=['Programming Language :: Python'],
58 packages=find_packages(),
59 install_requires=REQUIRES,
60 dependency_links=DEPENDENCY_LINKS,
61 include_package_data=True,
62 include_dirs=[np.get_include()],
63 )
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
'codecov',
'crds',
'cryptography',
- 'django>=2.0,<3.0',
+ 'django',
'flake8',
'inflection',
'ipython',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n 'codecov',\n 'crds',\n 'cryptography',\n- 'django>=2.0,<3.0',\n+ 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n", "issue": "Upgrade Django to 3.0\nDjango 3.0 is out, and since it is a major release, we should consider upgrading to this.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]}
| 868 | 80 |
gh_patches_debug_47926
|
rasdani/github-patches
|
git_diff
|
uccser__cs-unplugged-717
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py`
Content:
```
1 """Custom loader for loading structure of programming challenges."""
2
3 import os
4 from django.db import transaction
5 from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
6 from utils.TranslatableModelLoader import TranslatableModelLoader
7 from topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty
8
9
10 class ProgrammingChallengesStructureLoader(TranslatableModelLoader):
11 """Custom loader for loading structure of programming challenges."""
12
13 @transaction.atomic
14 def load(self):
15 """Load the content for structure of programming challenges.
16
17 Raises:
18 MissingRequiredFieldError: when no object can be found with the matching
19 attribute.
20 """
21 structure = self.load_yaml_file(self.structure_file_path)
22
23 prog_languages = structure.get("languages", None)
24 difficulty_levels = structure.get("difficulties", None)
25 if None in [prog_languages, difficulty_levels]:
26 raise MissingRequiredFieldError(
27 self.structure_file_path,
28 ["lanugages", "difficulties"],
29 "Programming Challenge Structure"
30 )
31
32 # Add "-languages" to the structure filename
33 prog_languages_translation_filename = "{}-languages.yaml".format(
34 os.path.splitext(self.structure_filename)[0]
35 )
36 prog_languages_translations = self.get_yaml_translations(
37 prog_languages_translation_filename,
38 required_slugs=prog_languages.keys(),
39 required_fields=["name"]
40 )
41
42 for (prog_language, prog_language_data) in prog_languages.items():
43
44 if prog_language_data is None:
45 raise MissingRequiredFieldError(
46 self.structure_file_path,
47 ["number"],
48 "Programming Challenge Language"
49 )
50
51 # Check for required fields
52 prog_language_number = prog_language_data.get("number", None)
53 if prog_language_number is None:
54 raise MissingRequiredFieldError(
55 self.structure_file_path,
56 ["number"],
57 "Programming Challenge Language"
58 )
59
60 # Check if icon is given
61 if "icon" in prog_language_data:
62 prog_language_icon = prog_language_data["icon"]
63 else:
64 prog_language_icon = None
65
66 new_prog_language = ProgrammingChallengeLanguage(
67 slug=prog_language,
68 number=prog_language_number,
69 icon=prog_language_icon
70 )
71
72 translations = prog_languages_translations.get(prog_language, dict())
73 self.populate_translations(new_prog_language, translations)
74 self.mark_translation_availability(new_prog_language, required_fields=["name"])
75 new_prog_language.save()
76
77 self.log("Added programming language: {}".format(new_prog_language.__str__()))
78
79 # Add "-languages" to the structure filename
80 difficulties_translation_filename = "{}-difficulties.yaml".format(
81 os.path.splitext(self.structure_filename)[0]
82 )
83 difficulties_translations = self.get_yaml_translations(
84 difficulties_translation_filename,
85 required_slugs=difficulty_levels,
86 required_fields=["name"],
87 )
88
89 for difficulty in difficulty_levels:
90
91 new_difficulty = ProgrammingChallengeDifficulty(
92 level=difficulty,
93 )
94
95 translations = difficulties_translations.get(difficulty, dict())
96 self.populate_translations(new_difficulty, translations)
97 self.mark_translation_availability(new_difficulty, required_fields=["name"])
98 new_difficulty.save()
99
100 self.log("Added programming difficulty level: {}".format(new_difficulty.__str__()))
101
102 self.log("")
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
@@ -89,7 +89,7 @@
for difficulty in difficulty_levels:
new_difficulty = ProgrammingChallengeDifficulty(
- level=difficulty,
+ level=int(difficulty),
)
translations = difficulties_translations.get(difficulty, dict())
|
{"golden_diff": "diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n@@ -89,7 +89,7 @@\n for difficulty in difficulty_levels:\n \n new_difficulty = ProgrammingChallengeDifficulty(\n- level=difficulty,\n+ level=int(difficulty),\n )\n \n translations = difficulties_translations.get(difficulty, dict())\n", "issue": "Workaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\n", "before_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=difficulty,\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}], "after_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=int(difficulty),\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}]}
| 1,250 | 133 |
gh_patches_debug_21496
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-2682
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CATEGORY_ALLOW_HIERARCHIES does not work
### Environment (if reporting a bug)
**Python Version:**
3.5.2
**Nikola Version:**
7.8.3 and master
**Operating System:**
Ubuntu 16.04
### Description:
I installed nikola in virtualenv from pip.
Then I set CATEGORY_ALLOW_HIERARCHIES to True in conf.py.
I created new post and edit category to be ".. category: code/python"
The error when building site:
```
TaskError - taskid:render_taxonomies:output/categories/cat_code/index.html
PythonAction Error
Traceback (most recent call last):
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/doit/action.py", line 403, in execute
returned_value = self.py_callable(*self.args, **kwargs)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/nikola.py", line 1396, in render_template
template_name, None, local_context)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/plugins/template/mako.py", line 117, in render_template
data = template.render_unicode(**context)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/template.py", line 471, in render_unicode
as_unicode=True)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py", line 838, in _render
**_kwargs_for_callable(callable_, data))
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py", line 873, in _render_context
_exec_template(inherit, lclcontext, args=args, kwargs=kwargs)
File "/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py", line 899, in _exec_template
callable_(context, *args, **kwargs)
File "/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/base.tmpl.py", line 127, in render_body
context['self'].content(**pageargs)
File "/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/tag.tmpl.py", line 137, in render_content
for name, link in subcategories:
ValueError: too many values to unpack (expected 2)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/categories.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2017 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Render the category pages and feeds."""
28
29 from __future__ import unicode_literals
30
31 from nikola.plugin_categories import Taxonomy
32 from nikola import utils
33
34
35 class ClassifyCategories(Taxonomy):
36 """Classify the posts by categories."""
37
38 name = "classify_categories"
39
40 classification_name = "category"
41 overview_page_variable_name = "categories"
42 overview_page_items_variable_name = "cat_items"
43 overview_page_hierarchy_variable_name = "cat_hierarchy"
44 more_than_one_classifications_per_post = False
45 has_hierarchy = True
46 include_posts_from_subhierarchies = True
47 include_posts_into_hierarchy_root = False
48 show_list_as_subcategories_list = False
49 generate_atom_feeds_for_post_lists = True
50 template_for_classification_overview = "tags.tmpl"
51 always_disable_rss = False
52 apply_to_posts = True
53 apply_to_pages = False
54 minimum_post_count_per_classification_in_overview = 1
55 omit_empty_classifications = True
56 also_create_classifications_from_other_languages = True
57 path_handler_docstrings = {
58 'category_index': """A link to the category index.
59
60 Example:
61
62 link://category_index => /categories/index.html""",
63 'category': """A link to a category. Takes page number as optional keyword argument.
64
65 Example:
66
67 link://category/dogs => /categories/dogs.html""",
68 'category_atom': """A link to a category's Atom feed.
69
70 Example:
71
72 link://category_atom/dogs => /categories/dogs.atom""",
73 'category_rss': """A link to a category's RSS feed.
74
75 Example:
76
77 link://category_rss/dogs => /categories/dogs.xml""",
78 }
79
80 def set_site(self, site):
81 """Set site, which is a Nikola instance."""
82 super(ClassifyCategories, self).set_site(site)
83 self.show_list_as_index = self.site.config['CATEGORY_PAGES_ARE_INDEXES']
84 self.template_for_single_list = "tagindex.tmpl" if self.show_list_as_index else "tag.tmpl"
85
86 def is_enabled(self, lang=None):
87 """Return True if this taxonomy is enabled, or False otherwise."""
88 return True
89
90 def classify(self, post, lang):
91 """Classify the given post for the given language."""
92 cat = post.meta('category', lang=lang).strip()
93 return [cat] if cat else []
94
95 def get_classification_friendly_name(self, classification, lang, only_last_component=False):
96 """Extract a friendly name from the classification."""
97 classification = self.extract_hierarchy(classification)
98 return classification[-1] if classification else ''
99
100 def get_overview_path(self, lang, dest_type='page'):
101 """A path handler for the list of all classifications."""
102 if self.site.config['CATEGORIES_INDEX_PATH'](lang):
103 path = self.site.config['CATEGORIES_INDEX_PATH'](lang)
104 if path.endswith('/index'): # TODO: remove in v8
105 utils.LOGGER.warn("CATEGORIES_INDEX_PATH for language {0} is missing a .html extension. Please update your configuration!".format(lang))
106 path += '.html'
107 return [_f for _f in [path] if _f], 'never'
108 else:
109 return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f], 'always'
110
111 def slugify_tag_name(self, name, lang):
112 """Slugify a tag name."""
113 if self.site.config['SLUG_TAG_PATH']:
114 name = utils.slugify(name, lang)
115 return name
116
117 def slugify_category_name(self, path, lang):
118 """Slugify a category name."""
119 if lang is None: # TODO: remove in v8
120 utils.LOGGER.warn("ClassifyCategories.slugify_category_name() called without language!")
121 lang = ''
122 if self.site.config['CATEGORY_OUTPUT_FLAT_HIERARCHY']:
123 path = path[-1:] # only the leaf
124 result = [self.slugify_tag_name(part, lang) for part in path]
125 result[0] = self.site.config['CATEGORY_PREFIX'] + result[0]
126 if not self.site.config['PRETTY_URLS']:
127 result = ['-'.join(result)]
128 return result
129
130 def get_path(self, classification, lang, dest_type='page'):
131 """A path handler for the given classification."""
132 return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f] + self.slugify_category_name(classification, lang), 'auto'
133
134 def extract_hierarchy(self, classification):
135 """Given a classification, return a list of parts in the hierarchy."""
136 return utils.parse_escaped_hierarchical_category_name(classification)
137
138 def recombine_classification_from_hierarchy(self, hierarchy):
139 """Given a list of parts in the hierarchy, return the classification string."""
140 return utils.join_hierarchical_category_path(hierarchy)
141
142 def provide_overview_context_and_uptodate(self, lang):
143 """Provide data for the context and the uptodate list for the list of all classifiations."""
144 kw = {
145 'category_path': self.site.config['CATEGORY_PATH'],
146 'category_prefix': self.site.config['CATEGORY_PREFIX'],
147 "category_pages_are_indexes": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],
148 "tzinfo": self.site.tzinfo,
149 "category_pages_descriptions": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],
150 "category_pages_titles": self.site.config['CATEGORY_PAGES_TITLES'],
151 }
152 context = {
153 "title": self.site.MESSAGES[lang]["Categories"],
154 "description": self.site.MESSAGES[lang]["Categories"],
155 "pagekind": ["list", "tags_page"],
156 }
157 kw.update(context)
158 return context, kw
159
160 def provide_context_and_uptodate(self, cat, lang, node=None):
161 """Provide data for the context and the uptodate list for the list of the given classifiation."""
162 cat_path = self.extract_hierarchy(cat)
163 kw = {
164 'category_path': self.site.config['CATEGORY_PATH'],
165 'category_prefix': self.site.config['CATEGORY_PREFIX'],
166 "category_pages_are_indexes": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],
167 "tzinfo": self.site.tzinfo,
168 "category_pages_descriptions": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],
169 "category_pages_titles": self.site.config['CATEGORY_PAGES_TITLES'],
170 }
171 posts = self.site.posts_per_classification[self.classification_name][lang]
172 children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]
173 subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]
174 friendly_name = self.get_classification_friendly_name(cat, lang)
175 context = {
176 "title": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang]["Posts about %s"] % friendly_name),
177 "description": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'].get(lang, {}).get(cat),
178 "kind": "category",
179 "pagekind": ["tag_page", "index" if self.show_list_as_index else "list"],
180 "tag": friendly_name,
181 "category": cat,
182 "category_path": cat_path,
183 "subcategories": subcats,
184 }
185 if self.show_list_as_index:
186 context["rss_link"] = """<link rel="alternate" type="application/rss+xml" type="application/rss+xml" title="RSS for tag {0} ({1})" href="{2}">""".format(friendly_name, lang, self.site.link("category_rss", cat, lang))
187 kw.update(context)
188 return context, kw
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/task/categories.py b/nikola/plugins/task/categories.py
--- a/nikola/plugins/task/categories.py
+++ b/nikola/plugins/task/categories.py
@@ -170,7 +170,7 @@
}
posts = self.site.posts_per_classification[self.classification_name][lang]
children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]
- subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]
+ subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang)) for child in children]
friendly_name = self.get_classification_friendly_name(cat, lang)
context = {
"title": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang]["Posts about %s"] % friendly_name),
|
{"golden_diff": "diff --git a/nikola/plugins/task/categories.py b/nikola/plugins/task/categories.py\n--- a/nikola/plugins/task/categories.py\n+++ b/nikola/plugins/task/categories.py\n@@ -170,7 +170,7 @@\n }\n posts = self.site.posts_per_classification[self.classification_name][lang]\n children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]\n- subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]\n+ subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang)) for child in children]\n friendly_name = self.get_classification_friendly_name(cat, lang)\n context = {\n \"title\": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang][\"Posts about %s\"] % friendly_name),\n", "issue": "CATEGORY_ALLOW_HIERARCHIES does not work\n\r\n\r\n### Environment (if reporting a bug)\r\n\r\n**Python Version:**\r\n3.5.2\r\n\r\n**Nikola Version:**\r\n7.8.3 and master\r\n\r\n**Operating System:**\r\nUbuntu 16.04\r\n\r\n### Description:\r\n\r\nI installed nikola in virtualenv from pip.\r\nThen I set CATEGORY_ALLOW_HIERARCHIES to True in conf.py.\r\nI created new post and edit category to be \".. category: code/python\"\r\nThe error when building site:\r\n```\r\nTaskError - taskid:render_taxonomies:output/categories/cat_code/index.html\r\nPythonAction Error\r\nTraceback (most recent call last):\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/doit/action.py\", line 403, in execute\r\n returned_value = self.py_callable(*self.args, **kwargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/nikola.py\", line 1396, in render_template\r\n template_name, None, local_context)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/nikola/plugins/template/mako.py\", line 117, in render_template\r\n data = template.render_unicode(**context)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/template.py\", line 471, in render_unicode\r\n as_unicode=True)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py\", line 838, in _render\r\n **_kwargs_for_callable(callable_, data))\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py\", line 873, in _render_context\r\n _exec_template(inherit, lclcontext, args=args, kwargs=kwargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/lib/python3.5/site-packages/mako/runtime.py\", line 899, in _exec_template\r\n callable_(context, *args, **kwargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/base.tmpl.py\", line 127, in render_body\r\n context['self'].content(**pageargs)\r\n File \"/home/denin/Workspace/nikola/nikola3/my_first_site/cache/.mako.tmp/tag.tmpl.py\", line 137, in render_content\r\n for name, link in subcategories:\r\nValueError: too many values to unpack (expected 2)\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2017 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Render the category pages and feeds.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom nikola.plugin_categories import Taxonomy\nfrom nikola import utils\n\n\nclass ClassifyCategories(Taxonomy):\n \"\"\"Classify the posts by categories.\"\"\"\n\n name = \"classify_categories\"\n\n classification_name = \"category\"\n overview_page_variable_name = \"categories\"\n overview_page_items_variable_name = \"cat_items\"\n overview_page_hierarchy_variable_name = \"cat_hierarchy\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = False\n show_list_as_subcategories_list = False\n generate_atom_feeds_for_post_lists = True\n template_for_classification_overview = \"tags.tmpl\"\n always_disable_rss = False\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = True\n also_create_classifications_from_other_languages = True\n path_handler_docstrings = {\n 'category_index': \"\"\"A link to the category index.\n\nExample:\n\nlink://category_index => /categories/index.html\"\"\",\n 'category': \"\"\"A link to a category. Takes page number as optional keyword argument.\n\nExample:\n\nlink://category/dogs => /categories/dogs.html\"\"\",\n 'category_atom': \"\"\"A link to a category's Atom feed.\n\nExample:\n\nlink://category_atom/dogs => /categories/dogs.atom\"\"\",\n 'category_rss': \"\"\"A link to a category's RSS feed.\n\nExample:\n\nlink://category_rss/dogs => /categories/dogs.xml\"\"\",\n }\n\n def set_site(self, site):\n \"\"\"Set site, which is a Nikola instance.\"\"\"\n super(ClassifyCategories, self).set_site(site)\n self.show_list_as_index = self.site.config['CATEGORY_PAGES_ARE_INDEXES']\n self.template_for_single_list = \"tagindex.tmpl\" if self.show_list_as_index else \"tag.tmpl\"\n\n def is_enabled(self, lang=None):\n \"\"\"Return True if this taxonomy is enabled, or False otherwise.\"\"\"\n return True\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n cat = post.meta('category', lang=lang).strip()\n return [cat] if cat else []\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n return classification[-1] if classification else ''\n\n def get_overview_path(self, lang, dest_type='page'):\n \"\"\"A path handler for the list of all classifications.\"\"\"\n if self.site.config['CATEGORIES_INDEX_PATH'](lang):\n path = self.site.config['CATEGORIES_INDEX_PATH'](lang)\n if path.endswith('/index'): # TODO: remove in v8\n utils.LOGGER.warn(\"CATEGORIES_INDEX_PATH for language {0} is missing a .html extension. Please update your configuration!\".format(lang))\n path += '.html'\n return [_f for _f in [path] if _f], 'never'\n else:\n return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f], 'always'\n\n def slugify_tag_name(self, name, lang):\n \"\"\"Slugify a tag name.\"\"\"\n if self.site.config['SLUG_TAG_PATH']:\n name = utils.slugify(name, lang)\n return name\n\n def slugify_category_name(self, path, lang):\n \"\"\"Slugify a category name.\"\"\"\n if lang is None: # TODO: remove in v8\n utils.LOGGER.warn(\"ClassifyCategories.slugify_category_name() called without language!\")\n lang = ''\n if self.site.config['CATEGORY_OUTPUT_FLAT_HIERARCHY']:\n path = path[-1:] # only the leaf\n result = [self.slugify_tag_name(part, lang) for part in path]\n result[0] = self.site.config['CATEGORY_PREFIX'] + result[0]\n if not self.site.config['PRETTY_URLS']:\n result = ['-'.join(result)]\n return result\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f] + self.slugify_category_name(classification, lang), 'auto'\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return utils.parse_escaped_hierarchical_category_name(classification)\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return utils.join_hierarchical_category_path(hierarchy)\n\n def provide_overview_context_and_uptodate(self, lang):\n \"\"\"Provide data for the context and the uptodate list for the list of all classifiations.\"\"\"\n kw = {\n 'category_path': self.site.config['CATEGORY_PATH'],\n 'category_prefix': self.site.config['CATEGORY_PREFIX'],\n \"category_pages_are_indexes\": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],\n \"tzinfo\": self.site.tzinfo,\n \"category_pages_descriptions\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],\n \"category_pages_titles\": self.site.config['CATEGORY_PAGES_TITLES'],\n }\n context = {\n \"title\": self.site.MESSAGES[lang][\"Categories\"],\n \"description\": self.site.MESSAGES[lang][\"Categories\"],\n \"pagekind\": [\"list\", \"tags_page\"],\n }\n kw.update(context)\n return context, kw\n\n def provide_context_and_uptodate(self, cat, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n cat_path = self.extract_hierarchy(cat)\n kw = {\n 'category_path': self.site.config['CATEGORY_PATH'],\n 'category_prefix': self.site.config['CATEGORY_PREFIX'],\n \"category_pages_are_indexes\": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],\n \"tzinfo\": self.site.tzinfo,\n \"category_pages_descriptions\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],\n \"category_pages_titles\": self.site.config['CATEGORY_PAGES_TITLES'],\n }\n posts = self.site.posts_per_classification[self.classification_name][lang]\n children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]\n subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang), child.classification_name, child.classification_path) for child in children]\n friendly_name = self.get_classification_friendly_name(cat, lang)\n context = {\n \"title\": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang][\"Posts about %s\"] % friendly_name),\n \"description\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'].get(lang, {}).get(cat),\n \"kind\": \"category\",\n \"pagekind\": [\"tag_page\", \"index\" if self.show_list_as_index else \"list\"],\n \"tag\": friendly_name,\n \"category\": cat,\n \"category_path\": cat_path,\n \"subcategories\": subcats,\n }\n if self.show_list_as_index:\n context[\"rss_link\"] = \"\"\"<link rel=\"alternate\" type=\"application/rss+xml\" type=\"application/rss+xml\" title=\"RSS for tag {0} ({1})\" href=\"{2}\">\"\"\".format(friendly_name, lang, self.site.link(\"category_rss\", cat, lang))\n kw.update(context)\n return context, kw\n", "path": "nikola/plugins/task/categories.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2017 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Render the category pages and feeds.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom nikola.plugin_categories import Taxonomy\nfrom nikola import utils\n\n\nclass ClassifyCategories(Taxonomy):\n \"\"\"Classify the posts by categories.\"\"\"\n\n name = \"classify_categories\"\n\n classification_name = \"category\"\n overview_page_variable_name = \"categories\"\n overview_page_items_variable_name = \"cat_items\"\n overview_page_hierarchy_variable_name = \"cat_hierarchy\"\n more_than_one_classifications_per_post = False\n has_hierarchy = True\n include_posts_from_subhierarchies = True\n include_posts_into_hierarchy_root = False\n show_list_as_subcategories_list = False\n generate_atom_feeds_for_post_lists = True\n template_for_classification_overview = \"tags.tmpl\"\n always_disable_rss = False\n apply_to_posts = True\n apply_to_pages = False\n minimum_post_count_per_classification_in_overview = 1\n omit_empty_classifications = True\n also_create_classifications_from_other_languages = True\n path_handler_docstrings = {\n 'category_index': \"\"\"A link to the category index.\n\nExample:\n\nlink://category_index => /categories/index.html\"\"\",\n 'category': \"\"\"A link to a category. Takes page number as optional keyword argument.\n\nExample:\n\nlink://category/dogs => /categories/dogs.html\"\"\",\n 'category_atom': \"\"\"A link to a category's Atom feed.\n\nExample:\n\nlink://category_atom/dogs => /categories/dogs.atom\"\"\",\n 'category_rss': \"\"\"A link to a category's RSS feed.\n\nExample:\n\nlink://category_rss/dogs => /categories/dogs.xml\"\"\",\n }\n\n def set_site(self, site):\n \"\"\"Set site, which is a Nikola instance.\"\"\"\n super(ClassifyCategories, self).set_site(site)\n self.show_list_as_index = self.site.config['CATEGORY_PAGES_ARE_INDEXES']\n self.template_for_single_list = \"tagindex.tmpl\" if self.show_list_as_index else \"tag.tmpl\"\n\n def is_enabled(self, lang=None):\n \"\"\"Return True if this taxonomy is enabled, or False otherwise.\"\"\"\n return True\n\n def classify(self, post, lang):\n \"\"\"Classify the given post for the given language.\"\"\"\n cat = post.meta('category', lang=lang).strip()\n return [cat] if cat else []\n\n def get_classification_friendly_name(self, classification, lang, only_last_component=False):\n \"\"\"Extract a friendly name from the classification.\"\"\"\n classification = self.extract_hierarchy(classification)\n return classification[-1] if classification else ''\n\n def get_overview_path(self, lang, dest_type='page'):\n \"\"\"A path handler for the list of all classifications.\"\"\"\n if self.site.config['CATEGORIES_INDEX_PATH'](lang):\n path = self.site.config['CATEGORIES_INDEX_PATH'](lang)\n if path.endswith('/index'): # TODO: remove in v8\n utils.LOGGER.warn(\"CATEGORIES_INDEX_PATH for language {0} is missing a .html extension. Please update your configuration!\".format(lang))\n path += '.html'\n return [_f for _f in [path] if _f], 'never'\n else:\n return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f], 'always'\n\n def slugify_tag_name(self, name, lang):\n \"\"\"Slugify a tag name.\"\"\"\n if self.site.config['SLUG_TAG_PATH']:\n name = utils.slugify(name, lang)\n return name\n\n def slugify_category_name(self, path, lang):\n \"\"\"Slugify a category name.\"\"\"\n if lang is None: # TODO: remove in v8\n utils.LOGGER.warn(\"ClassifyCategories.slugify_category_name() called without language!\")\n lang = ''\n if self.site.config['CATEGORY_OUTPUT_FLAT_HIERARCHY']:\n path = path[-1:] # only the leaf\n result = [self.slugify_tag_name(part, lang) for part in path]\n result[0] = self.site.config['CATEGORY_PREFIX'] + result[0]\n if not self.site.config['PRETTY_URLS']:\n result = ['-'.join(result)]\n return result\n\n def get_path(self, classification, lang, dest_type='page'):\n \"\"\"A path handler for the given classification.\"\"\"\n return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f] + self.slugify_category_name(classification, lang), 'auto'\n\n def extract_hierarchy(self, classification):\n \"\"\"Given a classification, return a list of parts in the hierarchy.\"\"\"\n return utils.parse_escaped_hierarchical_category_name(classification)\n\n def recombine_classification_from_hierarchy(self, hierarchy):\n \"\"\"Given a list of parts in the hierarchy, return the classification string.\"\"\"\n return utils.join_hierarchical_category_path(hierarchy)\n\n def provide_overview_context_and_uptodate(self, lang):\n \"\"\"Provide data for the context and the uptodate list for the list of all classifiations.\"\"\"\n kw = {\n 'category_path': self.site.config['CATEGORY_PATH'],\n 'category_prefix': self.site.config['CATEGORY_PREFIX'],\n \"category_pages_are_indexes\": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],\n \"tzinfo\": self.site.tzinfo,\n \"category_pages_descriptions\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],\n \"category_pages_titles\": self.site.config['CATEGORY_PAGES_TITLES'],\n }\n context = {\n \"title\": self.site.MESSAGES[lang][\"Categories\"],\n \"description\": self.site.MESSAGES[lang][\"Categories\"],\n \"pagekind\": [\"list\", \"tags_page\"],\n }\n kw.update(context)\n return context, kw\n\n def provide_context_and_uptodate(self, cat, lang, node=None):\n \"\"\"Provide data for the context and the uptodate list for the list of the given classifiation.\"\"\"\n cat_path = self.extract_hierarchy(cat)\n kw = {\n 'category_path': self.site.config['CATEGORY_PATH'],\n 'category_prefix': self.site.config['CATEGORY_PREFIX'],\n \"category_pages_are_indexes\": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],\n \"tzinfo\": self.site.tzinfo,\n \"category_pages_descriptions\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'],\n \"category_pages_titles\": self.site.config['CATEGORY_PAGES_TITLES'],\n }\n posts = self.site.posts_per_classification[self.classification_name][lang]\n children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]\n subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang)) for child in children]\n friendly_name = self.get_classification_friendly_name(cat, lang)\n context = {\n \"title\": self.site.config['CATEGORY_PAGES_TITLES'].get(lang, {}).get(cat, self.site.MESSAGES[lang][\"Posts about %s\"] % friendly_name),\n \"description\": self.site.config['CATEGORY_PAGES_DESCRIPTIONS'].get(lang, {}).get(cat),\n \"kind\": \"category\",\n \"pagekind\": [\"tag_page\", \"index\" if self.show_list_as_index else \"list\"],\n \"tag\": friendly_name,\n \"category\": cat,\n \"category_path\": cat_path,\n \"subcategories\": subcats,\n }\n if self.show_list_as_index:\n context[\"rss_link\"] = \"\"\"<link rel=\"alternate\" type=\"application/rss+xml\" type=\"application/rss+xml\" title=\"RSS for tag {0} ({1})\" href=\"{2}\">\"\"\".format(friendly_name, lang, self.site.link(\"category_rss\", cat, lang))\n kw.update(context)\n return context, kw\n", "path": "nikola/plugins/task/categories.py"}]}
| 3,260 | 243 |
gh_patches_debug_13056
|
rasdani/github-patches
|
git_diff
|
pypa__pip-392
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"pip freeze -r" returns ValueError() if requirements file contains --find-links
It seems that pip freeze should not pass --find-links or --index-url to distribute/setuptools when inlined in a requirements file. Here is an easy way to repro the problem:
1) echo "--find-links http://foo.bar/" > /tmp/req.txt
2) pip freeze -r /tmp/req.txt
"""
Exception:
Traceback (most recent call last):
File "/Users/hpfennig/work/pip/pip/basecommand.py", line 95, in main
self.run(options, args)
File "/Users/hpfennig/work/pip/pip/commands/freeze.py", line 92, in run
line_req = InstallRequirement.from_line(line)
File "/Users/hpfennig/work/pip/pip/req.py", line 105, in from_line
return cls(req, comes_from, url=url)
File "/Users/hpfennig/work/pip/pip/req.py", line 39, in **init**
req = pkg_resources.Requirement.parse(req)
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2510, in parse
reqs = list(parse_requirements(s))
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2436, in parse_requirements
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
File "/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py", line 2404, in scan_list
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
ValueError: ('Expected version spec in', '--find-links http://foo.bar/', 'at', ' http://foo.bar/')
Storing complete log in /Users/hpfennig/.pip/pip.log
"""
I have a fix, but I am not sure this is the correct way to go about it. Would be happy to do an official pull request if needed.
diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py
index 01b5df9..03ac80f 100644
--- a/pip/commands/freeze.py
+++ b/pip/commands/freeze.py
@@ -85,7 +85,9 @@ class FreezeCommand(Command):
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
- or line.startswith('--extra-index-url')):
- or line.startswith('--extra-index-url')
- or line.startswith('--find-links')
- or line.startswith('--index-url')):
f.write(line)
continue
else:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/commands/freeze.py`
Content:
```
1 import re
2 import sys
3 import pkg_resources
4 import pip
5 from pip.req import InstallRequirement
6 from pip.log import logger
7 from pip.basecommand import Command
8 from pip.util import get_installed_distributions
9
10
11 class FreezeCommand(Command):
12 name = 'freeze'
13 usage = '%prog [OPTIONS]'
14 summary = 'Output all currently installed packages (exact versions) to stdout'
15
16 def __init__(self):
17 super(FreezeCommand, self).__init__()
18 self.parser.add_option(
19 '-r', '--requirement',
20 dest='requirement',
21 action='store',
22 default=None,
23 metavar='FILENAME',
24 help='Use the given requirements file as a hint about how to generate the new frozen requirements')
25 self.parser.add_option(
26 '-f', '--find-links',
27 dest='find_links',
28 action='append',
29 default=[],
30 metavar='URL',
31 help='URL for finding packages, which will be added to the frozen requirements file')
32 self.parser.add_option(
33 '-l', '--local',
34 dest='local',
35 action='store_true',
36 default=False,
37 help='If in a virtualenv, do not report globally-installed packages')
38
39 def setup_logging(self):
40 logger.move_stdout_to_stderr()
41
42 def run(self, options, args):
43 requirement = options.requirement
44 find_links = options.find_links or []
45 local_only = options.local
46 ## FIXME: Obviously this should be settable:
47 find_tags = False
48 skip_match = None
49
50 skip_regex = options.skip_requirements_regex
51 if skip_regex:
52 skip_match = re.compile(skip_regex)
53
54 dependency_links = []
55
56 f = sys.stdout
57
58 for dist in pkg_resources.working_set:
59 if dist.has_metadata('dependency_links.txt'):
60 dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))
61 for link in find_links:
62 if '#egg=' in link:
63 dependency_links.append(link)
64 for link in find_links:
65 f.write('-f %s\n' % link)
66 installations = {}
67 for dist in get_installed_distributions(local_only=local_only):
68 req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)
69 installations[req.name] = req
70 if requirement:
71 req_f = open(requirement)
72 for line in req_f:
73 if not line.strip() or line.strip().startswith('#'):
74 f.write(line)
75 continue
76 if skip_match and skip_match.search(line):
77 f.write(line)
78 continue
79 elif line.startswith('-e') or line.startswith('--editable'):
80 if line.startswith('-e'):
81 line = line[2:].strip()
82 else:
83 line = line[len('--editable'):].strip().lstrip('=')
84 line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)
85 elif (line.startswith('-r') or line.startswith('--requirement')
86 or line.startswith('-Z') or line.startswith('--always-unzip')
87 or line.startswith('-f') or line.startswith('-i')
88 or line.startswith('--extra-index-url')):
89 f.write(line)
90 continue
91 else:
92 line_req = InstallRequirement.from_line(line)
93 if not line_req.name:
94 logger.notify("Skipping line because it's not clear what it would install: %s"
95 % line.strip())
96 logger.notify(" (add #egg=PackageName to the URL to avoid this warning)")
97 continue
98 if line_req.name not in installations:
99 logger.warn("Requirement file contains %s, but that package is not installed"
100 % line.strip())
101 continue
102 f.write(str(installations[line_req.name]))
103 del installations[line_req.name]
104 f.write('## The following requirements were added by pip --freeze:\n')
105 for installation in sorted(installations.values(), key=lambda x: x.name):
106 f.write(str(installation))
107
108
109 FreezeCommand()
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py
--- a/pip/commands/freeze.py
+++ b/pip/commands/freeze.py
@@ -85,7 +85,9 @@
elif (line.startswith('-r') or line.startswith('--requirement')
or line.startswith('-Z') or line.startswith('--always-unzip')
or line.startswith('-f') or line.startswith('-i')
- or line.startswith('--extra-index-url')):
+ or line.startswith('--extra-index-url')
+ or line.startswith('--find-links')
+ or line.startswith('--index-url')):
f.write(line)
continue
else:
|
{"golden_diff": "diff --git a/pip/commands/freeze.py b/pip/commands/freeze.py\n--- a/pip/commands/freeze.py\n+++ b/pip/commands/freeze.py\n@@ -85,7 +85,9 @@\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n- or line.startswith('--extra-index-url')):\n+ or line.startswith('--extra-index-url')\n+ or line.startswith('--find-links')\n+ or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n", "issue": "\"pip freeze -r\" returns ValueError() if requirements file contains --find-links\nIt seems that pip freeze should not pass --find-links or --index-url to distribute/setuptools when inlined in a requirements file. Here is an easy way to repro the problem:\n\n1) echo \"--find-links http://foo.bar/\" > /tmp/req.txt\n2) pip freeze -r /tmp/req.txt\n\n\"\"\"\nException:\nTraceback (most recent call last):\n File \"/Users/hpfennig/work/pip/pip/basecommand.py\", line 95, in main\n self.run(options, args)\n File \"/Users/hpfennig/work/pip/pip/commands/freeze.py\", line 92, in run\n line_req = InstallRequirement.from_line(line)\n File \"/Users/hpfennig/work/pip/pip/req.py\", line 105, in from_line\n return cls(req, comes_from, url=url)\n File \"/Users/hpfennig/work/pip/pip/req.py\", line 39, in **init**\n req = pkg_resources.Requirement.parse(req)\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2510, in parse\n reqs = list(parse_requirements(s))\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2436, in parse_requirements\n line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),\"version spec\")\n File \"/Users/hpfennig/.virtualenvs/pip-bug-virt-env/lib/python2.7/site-packages/setuptools-0.6c11-py2.7.egg/pkg_resources.py\", line 2404, in scan_list\n raise ValueError(\"Expected \"+item_name+\" in\",line,\"at\",line[p:])\nValueError: ('Expected version spec in', '--find-links http://foo.bar/', 'at', ' http://foo.bar/')\n\nStoring complete log in /Users/hpfennig/.pip/pip.log\n\"\"\"\n\nI have a fix, but I am not sure this is the correct way to go about it. Would be happy to do an official pull request if needed.\n\ndiff --git a/pip/commands/freeze.py b/pip/commands/freeze.py\nindex 01b5df9..03ac80f 100644\n--- a/pip/commands/freeze.py\n+++ b/pip/commands/freeze.py\n@@ -85,7 +85,9 @@ class FreezeCommand(Command):\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n- or line.startswith('--extra-index-url')):\n- or line.startswith('--extra-index-url')\n- or line.startswith('--find-links')\n- or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n\n", "before_files": [{"content": "import re\nimport sys\nimport pkg_resources\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.log import logger\nfrom pip.basecommand import Command\nfrom pip.util import get_installed_distributions\n\n\nclass FreezeCommand(Command):\n name = 'freeze'\n usage = '%prog [OPTIONS]'\n summary = 'Output all currently installed packages (exact versions) to stdout'\n\n def __init__(self):\n super(FreezeCommand, self).__init__()\n self.parser.add_option(\n '-r', '--requirement',\n dest='requirement',\n action='store',\n default=None,\n metavar='FILENAME',\n help='Use the given requirements file as a hint about how to generate the new frozen requirements')\n self.parser.add_option(\n '-f', '--find-links',\n dest='find_links',\n action='append',\n default=[],\n metavar='URL',\n help='URL for finding packages, which will be added to the frozen requirements file')\n self.parser.add_option(\n '-l', '--local',\n dest='local',\n action='store_true',\n default=False,\n help='If in a virtualenv, do not report globally-installed packages')\n\n def setup_logging(self):\n logger.move_stdout_to_stderr()\n\n def run(self, options, args):\n requirement = options.requirement\n find_links = options.find_links or []\n local_only = options.local\n ## FIXME: Obviously this should be settable:\n find_tags = False\n skip_match = None\n\n skip_regex = options.skip_requirements_regex\n if skip_regex:\n skip_match = re.compile(skip_regex)\n\n dependency_links = []\n\n f = sys.stdout\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n f.write('-f %s\\n' % link)\n installations = {}\n for dist in get_installed_distributions(local_only=local_only):\n req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)\n installations[req.name] = req\n if requirement:\n req_f = open(requirement)\n for line in req_f:\n if not line.strip() or line.strip().startswith('#'):\n f.write(line)\n continue\n if skip_match and skip_match.search(line):\n f.write(line)\n continue\n elif line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n or line.startswith('--extra-index-url')):\n f.write(line)\n continue\n else:\n line_req = InstallRequirement.from_line(line)\n if not line_req.name:\n logger.notify(\"Skipping line because it's not clear what it would install: %s\"\n % line.strip())\n logger.notify(\" (add #egg=PackageName to the URL to avoid this warning)\")\n continue\n if line_req.name not in installations:\n logger.warn(\"Requirement file contains %s, but that package is not installed\"\n % line.strip())\n continue\n f.write(str(installations[line_req.name]))\n del installations[line_req.name]\n f.write('## The following requirements were added by pip --freeze:\\n')\n for installation in sorted(installations.values(), key=lambda x: x.name):\n f.write(str(installation))\n\n\nFreezeCommand()\n", "path": "pip/commands/freeze.py"}], "after_files": [{"content": "import re\nimport sys\nimport pkg_resources\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.log import logger\nfrom pip.basecommand import Command\nfrom pip.util import get_installed_distributions\n\n\nclass FreezeCommand(Command):\n name = 'freeze'\n usage = '%prog [OPTIONS]'\n summary = 'Output all currently installed packages (exact versions) to stdout'\n\n def __init__(self):\n super(FreezeCommand, self).__init__()\n self.parser.add_option(\n '-r', '--requirement',\n dest='requirement',\n action='store',\n default=None,\n metavar='FILENAME',\n help='Use the given requirements file as a hint about how to generate the new frozen requirements')\n self.parser.add_option(\n '-f', '--find-links',\n dest='find_links',\n action='append',\n default=[],\n metavar='URL',\n help='URL for finding packages, which will be added to the frozen requirements file')\n self.parser.add_option(\n '-l', '--local',\n dest='local',\n action='store_true',\n default=False,\n help='If in a virtualenv, do not report globally-installed packages')\n\n def setup_logging(self):\n logger.move_stdout_to_stderr()\n\n def run(self, options, args):\n requirement = options.requirement\n find_links = options.find_links or []\n local_only = options.local\n ## FIXME: Obviously this should be settable:\n find_tags = False\n skip_match = None\n\n skip_regex = options.skip_requirements_regex\n if skip_regex:\n skip_match = re.compile(skip_regex)\n\n dependency_links = []\n\n f = sys.stdout\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(dist.get_metadata_lines('dependency_links.txt'))\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n f.write('-f %s\\n' % link)\n installations = {}\n for dist in get_installed_distributions(local_only=local_only):\n req = pip.FrozenRequirement.from_dist(dist, dependency_links, find_tags=find_tags)\n installations[req.name] = req\n if requirement:\n req_f = open(requirement)\n for line in req_f:\n if not line.strip() or line.strip().startswith('#'):\n f.write(line)\n continue\n if skip_match and skip_match.search(line):\n f.write(line)\n continue\n elif line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(line, default_vcs=options.default_vcs)\n elif (line.startswith('-r') or line.startswith('--requirement')\n or line.startswith('-Z') or line.startswith('--always-unzip')\n or line.startswith('-f') or line.startswith('-i')\n or line.startswith('--extra-index-url')\n or line.startswith('--find-links')\n or line.startswith('--index-url')):\n f.write(line)\n continue\n else:\n line_req = InstallRequirement.from_line(line)\n if not line_req.name:\n logger.notify(\"Skipping line because it's not clear what it would install: %s\"\n % line.strip())\n logger.notify(\" (add #egg=PackageName to the URL to avoid this warning)\")\n continue\n if line_req.name not in installations:\n logger.warn(\"Requirement file contains %s, but that package is not installed\"\n % line.strip())\n continue\n f.write(str(installations[line_req.name]))\n del installations[line_req.name]\n f.write('## The following requirements were added by pip --freeze:\\n')\n for installation in sorted(installations.values(), key=lambda x: x.name):\n f.write(str(installation))\n\n\nFreezeCommand()\n", "path": "pip/commands/freeze.py"}]}
| 2,014 | 144 |
gh_patches_debug_11571
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-78
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make the package tag-line on PyPi more concrete
Javier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5
6 """A setuptools based setup module.
7
8 See:
9 https://packaging.python.org/guides/distributing-packages-using-setuptools/
10 """
11
12 import os
13 from math import floor
14 import pathlib
15 from random import random
16 from setuptools import setup, find_packages # type: ignore
17
18
19 here = pathlib.Path(__file__).parent.resolve()
20
21 # Get the long description from the README file
22 long_description = (here / 'README.md').read_text(encoding='utf-8')
23
24 version = ''
25
26 # If running from a GitHub Action then a standard set of environment variables will be
27 # populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).
28 # In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.
29 # If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.
30 # Extract this tag and use it as a version string
31 # See also:
32 # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
33 # https://github.com/pypa/gh-action-pypi-publish
34 GITHUB_REF_TAG_COMMIT = 'refs/tags/'
35
36 github_ref = os.getenv('GITHUB_REF')
37 if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
38 version = github_ref[len(GITHUB_REF_TAG_COMMIT):]
39
40 # Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.
41 # Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be
42 # '0.1.2.post124'. Although this is discouraged, see:
43 # https://www.python.org/dev/peps/pep-0440/#post-releases
44 # it is necessary here to avoid duplicate packages in Test.PyPI.
45 if not version:
46 # TODO: Replace this with more principled package version management for the package wheels built during local test
47 # runs, one which circumvents AzureML's apparent package caching:
48 build_number = os.getenv('GITHUB_RUN_NUMBER')
49 if build_number:
50 version = '0.1.0.post' + build_number
51 else:
52 default_random_version_number = floor(random() * 10_000_000_000)
53 version = f'0.1.0.post{str(default_random_version_number)}'
54
55 (here / 'latest_version.txt').write_text(version)
56
57 # Read run_requirements.txt to get install_requires
58 install_requires = (here / 'run_requirements.txt').read_text().split("\n")
59 # Remove any whitespace and blank lines
60 install_requires = [line.strip() for line in install_requires if line.strip()]
61
62 setup(
63 name='hi-ml',
64 version=version,
65 description='Microsoft Health Intelligence AzureML helpers',
66 long_description=long_description,
67 long_description_content_type='text/markdown',
68 url='https://github.com/microsoft/hi-ml',
69 author="Microsoft Research Cambridge InnerEye Team ",
70 author_email="[email protected]",
71 classifiers=[
72 'Development Status :: 3 - Alpha',
73 'Intended Audience :: Science/Research',
74 "Topic :: Scientific/Engineering :: Medical Science Apps.",
75 'License :: OSI Approved :: MIT License',
76 'Programming Language :: Python :: 3.7'
77 ],
78 keywords='InnerEye, HealthIntelligence, AzureML',
79 license='MIT License',
80 packages=find_packages(where="src"),
81 package_dir={"": "src"},
82 include_package_data=True,
83 install_requires=install_requires,
84 scripts=['src/health/azure/run_tensorboard.py']
85 )
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,10 +59,12 @@
# Remove any whitespace and blank lines
install_requires = [line.strip() for line in install_requires if line.strip()]
+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'
+
setup(
name='hi-ml',
version=version,
- description='Microsoft Health Intelligence AzureML helpers',
+ description=description,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/microsoft/hi-ml',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,10 +59,12 @@\n # Remove any whitespace and blank lines\n install_requires = [line.strip() for line in install_requires if line.strip()]\n \n+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n+\n setup(\n name='hi-ml',\n version=version,\n- description='Microsoft Health Intelligence AzureML helpers',\n+ description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n", "issue": "Make the package tag-line on PyPi more concrete\nJavier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\nsetup(\n name='hi-ml',\n version=version,\n description='Microsoft Health Intelligence AzureML helpers',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}], "after_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\ndescription = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n\nsetup(\n name='hi-ml',\n version=version,\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}]}
| 1,292 | 141 |
gh_patches_debug_904
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1081
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error running cfn-lint with pipe (|)
cfn-lint version: *v0.23.0*
Hello we have a problem running cfn-lint with find command. Only this version is affected as far as we know.
We are keeping couple of template is a folder and linting them like that:
```
find ./templates -type f | xargs cfn-lint -f parseable -c I -t
```
It worked flawlessly before but with the new update we are getting this error:
> 2019-08-02 15:37:01,818 - cfnlint.decode - ERROR - Template file not found: None
None:1:1:1:2:E0000:Template file not found: None
Splitting the files in separated lines with `xargs -L 1` doesn't help.
If you run the cfn-lint command on it's own it works as expected.
This example **doesn't** work:
```
find ./templates -type f | xargs -t cfn-lint -f parseable -c I -t
cfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml
2019-08-02 15:50:20,891 - cfnlint.decode - ERROR - Template file not found: None
None:1:1:1:2:E0000:Template file not found: None
```
This example works:
```
cfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml
```
Regards TT
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/core.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import logging
18 import os
19 import sys
20 from jsonschema.exceptions import ValidationError
21 from cfnlint import RulesCollection
22 import cfnlint.config
23 import cfnlint.formatters
24 import cfnlint.decode
25 import cfnlint.maintenance
26 from cfnlint.helpers import REGIONS
27
28
29 LOGGER = logging.getLogger('cfnlint')
30 DEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')
31
32
33 class CfnLintExitException(Exception):
34 """Generic exception used when the cli should exit"""
35 def __init__(self, msg=None, exit_code=1):
36 if msg is None:
37 msg = 'process failed with exit code %s' % exit_code
38 super(CfnLintExitException, self).__init__(msg)
39 self.exit_code = exit_code
40
41
42 class InvalidRegionException(CfnLintExitException):
43 """When an unsupported/invalid region is supplied"""
44
45
46 class UnexpectedRuleException(CfnLintExitException):
47 """When processing a rule fails in an unexpected way"""
48
49
50 def run_cli(filename, template, rules, regions, override_spec):
51 """Process args and run"""
52
53 if override_spec:
54 cfnlint.helpers.override_specs(override_spec)
55
56 return run_checks(filename, template, rules, regions)
57
58
59 def get_exit_code(matches):
60 """ Determine exit code """
61 exit_code = 0
62 for match in matches:
63 if match.rule.id[0] == 'I':
64 exit_code = exit_code | 8
65 elif match.rule.id[0] == 'W':
66 exit_code = exit_code | 4
67 elif match.rule.id[0] == 'E':
68 exit_code = exit_code | 2
69
70 return exit_code
71
72
73 def get_formatter(fmt):
74 """ Get Formatter"""
75 formatter = {}
76 if fmt:
77 if fmt == 'quiet':
78 formatter = cfnlint.formatters.QuietFormatter()
79 elif fmt == 'parseable':
80 # pylint: disable=bad-option-value
81 formatter = cfnlint.formatters.ParseableFormatter()
82 elif fmt == 'json':
83 formatter = cfnlint.formatters.JsonFormatter()
84 else:
85 formatter = cfnlint.formatters.Formatter()
86
87 return formatter
88
89
90 def get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):
91 """Get rules"""
92 rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)
93 rules_dirs = [DEFAULT_RULESDIR] + rulesdir
94 try:
95 for rules_dir in rules_dirs:
96 rules.create_from_directory(rules_dir)
97 except OSError as e:
98 raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)
99 return rules
100
101
102 def configure_logging(debug_logging):
103 """ Backwards compatibility for integrators """
104 LOGGER.info('Update your integrations to use "cfnlint.config.configure_logging" instead')
105 cfnlint.config.configure_logging(debug_logging, False)
106
107
108 def get_args_filenames(cli_args):
109 """ Get Template Configuration items and set them as default values"""
110 try:
111 config = cfnlint.config.ConfigMixIn(cli_args)
112 except ValidationError as e:
113 LOGGER.error('Error parsing config file: %s', str(e))
114 exit(1)
115
116 fmt = config.format
117 formatter = get_formatter(fmt)
118
119 if config.update_specs:
120 cfnlint.maintenance.update_resource_specs()
121 exit(0)
122
123 if config.update_documentation:
124 # Get ALL rules (ignore the CLI settings))
125 documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)
126 cfnlint.maintenance.update_documentation(documentation_rules)
127 exit(0)
128
129 if config.update_iam_policies:
130 cfnlint.maintenance.update_iam_policies()
131 exit(0)
132
133 if config.listrules:
134 rules = cfnlint.core.get_rules(
135 config.append_rules,
136 config.ignore_checks,
137 config.include_checks,
138 config.configure_rules
139 )
140 print(rules)
141 exit(0)
142
143 if not sys.stdin.isatty():
144 return(config, [None], formatter)
145
146 if not config.templates:
147 # Not specified, print the help
148 config.parser.print_help()
149 exit(1)
150
151 return(config, config.templates, formatter)
152
153
154 def get_template_rules(filename, args):
155 """ Get Template Configuration items and set them as default values"""
156
157 (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)
158
159 if matches:
160 return(template, [], matches)
161
162 args.template_args = template
163
164 rules = cfnlint.core.get_rules(
165 args.append_rules,
166 args.ignore_checks,
167 args.include_checks,
168 args.configure_rules,
169 args.include_experimental,
170 )
171
172 return(template, rules, [])
173
174
175 def run_checks(filename, template, rules, regions):
176 """Run Checks against the template"""
177 if regions:
178 if not set(regions).issubset(set(REGIONS)):
179 unsupported_regions = list(set(regions).difference(set(REGIONS)))
180 msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)
181 raise InvalidRegionException(msg, 32)
182
183 matches = []
184
185 runner = cfnlint.Runner(rules, filename, template, regions)
186 matches.extend(runner.transform())
187 # Only do rule analysis if Transform was successful
188 if not matches:
189 try:
190 matches.extend(runner.run())
191 except Exception as err: # pylint: disable=W0703
192 msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))
193 UnexpectedRuleException(msg, 1)
194 matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))
195
196 return(matches)
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py
--- a/src/cfnlint/core.py
+++ b/src/cfnlint/core.py
@@ -140,7 +140,7 @@
print(rules)
exit(0)
- if not sys.stdin.isatty():
+ if not sys.stdin.isatty() and not config.templates:
return(config, [None], formatter)
if not config.templates:
|
{"golden_diff": "diff --git a/src/cfnlint/core.py b/src/cfnlint/core.py\n--- a/src/cfnlint/core.py\n+++ b/src/cfnlint/core.py\n@@ -140,7 +140,7 @@\n print(rules)\n exit(0)\n \n- if not sys.stdin.isatty():\n+ if not sys.stdin.isatty() and not config.templates:\n return(config, [None], formatter)\n \n if not config.templates:\n", "issue": "Error running cfn-lint with pipe (|)\ncfn-lint version: *v0.23.0*\r\n\r\nHello we have a problem running cfn-lint with find command. Only this version is affected as far as we know.\r\nWe are keeping couple of template is a folder and linting them like that:\r\n```\r\nfind ./templates -type f | xargs cfn-lint -f parseable -c I -t\r\n```\r\n\r\nIt worked flawlessly before but with the new update we are getting this error:\r\n\r\n> 2019-08-02 15:37:01,818 - cfnlint.decode - ERROR - Template file not found: None\r\nNone:1:1:1:2:E0000:Template file not found: None\r\n\r\nSplitting the files in separated lines with `xargs -L 1` doesn't help.\r\n\r\nIf you run the cfn-lint command on it's own it works as expected.\r\n\r\nThis example **doesn't** work:\r\n```\r\nfind ./templates -type f | xargs -t cfn-lint -f parseable -c I -t\r\ncfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml \r\n2019-08-02 15:50:20,891 - cfnlint.decode - ERROR - Template file not found: None\r\nNone:1:1:1:2:E0000:Template file not found: None\r\n```\r\nThis example works:\r\n```\r\ncfn-lint -f parseable -c I -t ./templates/t1.yml ./templates/t2.yml ./templates/t3.yml\r\n ```\r\n\r\nRegards TT\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport os\nimport sys\nfrom jsonschema.exceptions import ValidationError\nfrom cfnlint import RulesCollection\nimport cfnlint.config\nimport cfnlint.formatters\nimport cfnlint.decode\nimport cfnlint.maintenance\nfrom cfnlint.helpers import REGIONS\n\n\nLOGGER = logging.getLogger('cfnlint')\nDEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')\n\n\nclass CfnLintExitException(Exception):\n \"\"\"Generic exception used when the cli should exit\"\"\"\n def __init__(self, msg=None, exit_code=1):\n if msg is None:\n msg = 'process failed with exit code %s' % exit_code\n super(CfnLintExitException, self).__init__(msg)\n self.exit_code = exit_code\n\n\nclass InvalidRegionException(CfnLintExitException):\n \"\"\"When an unsupported/invalid region is supplied\"\"\"\n\n\nclass UnexpectedRuleException(CfnLintExitException):\n \"\"\"When processing a rule fails in an unexpected way\"\"\"\n\n\ndef run_cli(filename, template, rules, regions, override_spec):\n \"\"\"Process args and run\"\"\"\n\n if override_spec:\n cfnlint.helpers.override_specs(override_spec)\n\n return run_checks(filename, template, rules, regions)\n\n\ndef get_exit_code(matches):\n \"\"\" Determine exit code \"\"\"\n exit_code = 0\n for match in matches:\n if match.rule.id[0] == 'I':\n exit_code = exit_code | 8\n elif match.rule.id[0] == 'W':\n exit_code = exit_code | 4\n elif match.rule.id[0] == 'E':\n exit_code = exit_code | 2\n\n return exit_code\n\n\ndef get_formatter(fmt):\n \"\"\" Get Formatter\"\"\"\n formatter = {}\n if fmt:\n if fmt == 'quiet':\n formatter = cfnlint.formatters.QuietFormatter()\n elif fmt == 'parseable':\n # pylint: disable=bad-option-value\n formatter = cfnlint.formatters.ParseableFormatter()\n elif fmt == 'json':\n formatter = cfnlint.formatters.JsonFormatter()\n else:\n formatter = cfnlint.formatters.Formatter()\n\n return formatter\n\n\ndef get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):\n \"\"\"Get rules\"\"\"\n rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)\n rules_dirs = [DEFAULT_RULESDIR] + rulesdir\n try:\n for rules_dir in rules_dirs:\n rules.create_from_directory(rules_dir)\n except OSError as e:\n raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)\n return rules\n\n\ndef configure_logging(debug_logging):\n \"\"\" Backwards compatibility for integrators \"\"\"\n LOGGER.info('Update your integrations to use \"cfnlint.config.configure_logging\" instead')\n cfnlint.config.configure_logging(debug_logging, False)\n\n\ndef get_args_filenames(cli_args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n try:\n config = cfnlint.config.ConfigMixIn(cli_args)\n except ValidationError as e:\n LOGGER.error('Error parsing config file: %s', str(e))\n exit(1)\n\n fmt = config.format\n formatter = get_formatter(fmt)\n\n if config.update_specs:\n cfnlint.maintenance.update_resource_specs()\n exit(0)\n\n if config.update_documentation:\n # Get ALL rules (ignore the CLI settings))\n documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)\n cfnlint.maintenance.update_documentation(documentation_rules)\n exit(0)\n\n if config.update_iam_policies:\n cfnlint.maintenance.update_iam_policies()\n exit(0)\n\n if config.listrules:\n rules = cfnlint.core.get_rules(\n config.append_rules,\n config.ignore_checks,\n config.include_checks,\n config.configure_rules\n )\n print(rules)\n exit(0)\n\n if not sys.stdin.isatty():\n return(config, [None], formatter)\n\n if not config.templates:\n # Not specified, print the help\n config.parser.print_help()\n exit(1)\n\n return(config, config.templates, formatter)\n\n\ndef get_template_rules(filename, args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n\n (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)\n\n if matches:\n return(template, [], matches)\n\n args.template_args = template\n\n rules = cfnlint.core.get_rules(\n args.append_rules,\n args.ignore_checks,\n args.include_checks,\n args.configure_rules,\n args.include_experimental,\n )\n\n return(template, rules, [])\n\n\ndef run_checks(filename, template, rules, regions):\n \"\"\"Run Checks against the template\"\"\"\n if regions:\n if not set(regions).issubset(set(REGIONS)):\n unsupported_regions = list(set(regions).difference(set(REGIONS)))\n msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)\n raise InvalidRegionException(msg, 32)\n\n matches = []\n\n runner = cfnlint.Runner(rules, filename, template, regions)\n matches.extend(runner.transform())\n # Only do rule analysis if Transform was successful\n if not matches:\n try:\n matches.extend(runner.run())\n except Exception as err: # pylint: disable=W0703\n msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))\n UnexpectedRuleException(msg, 1)\n matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))\n\n return(matches)\n", "path": "src/cfnlint/core.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport logging\nimport os\nimport sys\nfrom jsonschema.exceptions import ValidationError\nfrom cfnlint import RulesCollection\nimport cfnlint.config\nimport cfnlint.formatters\nimport cfnlint.decode\nimport cfnlint.maintenance\nfrom cfnlint.helpers import REGIONS\n\n\nLOGGER = logging.getLogger('cfnlint')\nDEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')\n\n\nclass CfnLintExitException(Exception):\n \"\"\"Generic exception used when the cli should exit\"\"\"\n def __init__(self, msg=None, exit_code=1):\n if msg is None:\n msg = 'process failed with exit code %s' % exit_code\n super(CfnLintExitException, self).__init__(msg)\n self.exit_code = exit_code\n\n\nclass InvalidRegionException(CfnLintExitException):\n \"\"\"When an unsupported/invalid region is supplied\"\"\"\n\n\nclass UnexpectedRuleException(CfnLintExitException):\n \"\"\"When processing a rule fails in an unexpected way\"\"\"\n\n\ndef run_cli(filename, template, rules, regions, override_spec):\n \"\"\"Process args and run\"\"\"\n\n if override_spec:\n cfnlint.helpers.override_specs(override_spec)\n\n return run_checks(filename, template, rules, regions)\n\n\ndef get_exit_code(matches):\n \"\"\" Determine exit code \"\"\"\n exit_code = 0\n for match in matches:\n if match.rule.id[0] == 'I':\n exit_code = exit_code | 8\n elif match.rule.id[0] == 'W':\n exit_code = exit_code | 4\n elif match.rule.id[0] == 'E':\n exit_code = exit_code | 2\n\n return exit_code\n\n\ndef get_formatter(fmt):\n \"\"\" Get Formatter\"\"\"\n formatter = {}\n if fmt:\n if fmt == 'quiet':\n formatter = cfnlint.formatters.QuietFormatter()\n elif fmt == 'parseable':\n # pylint: disable=bad-option-value\n formatter = cfnlint.formatters.ParseableFormatter()\n elif fmt == 'json':\n formatter = cfnlint.formatters.JsonFormatter()\n else:\n formatter = cfnlint.formatters.Formatter()\n\n return formatter\n\n\ndef get_rules(rulesdir, ignore_rules, include_rules, configure_rules=None, include_experimental=False):\n \"\"\"Get rules\"\"\"\n rules = RulesCollection(ignore_rules, include_rules, configure_rules, include_experimental)\n rules_dirs = [DEFAULT_RULESDIR] + rulesdir\n try:\n for rules_dir in rules_dirs:\n rules.create_from_directory(rules_dir)\n except OSError as e:\n raise UnexpectedRuleException('Tried to append rules but got an error: %s' % str(e), 1)\n return rules\n\n\ndef configure_logging(debug_logging):\n \"\"\" Backwards compatibility for integrators \"\"\"\n LOGGER.info('Update your integrations to use \"cfnlint.config.configure_logging\" instead')\n cfnlint.config.configure_logging(debug_logging, False)\n\n\ndef get_args_filenames(cli_args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n try:\n config = cfnlint.config.ConfigMixIn(cli_args)\n except ValidationError as e:\n LOGGER.error('Error parsing config file: %s', str(e))\n exit(1)\n\n fmt = config.format\n formatter = get_formatter(fmt)\n\n if config.update_specs:\n cfnlint.maintenance.update_resource_specs()\n exit(0)\n\n if config.update_documentation:\n # Get ALL rules (ignore the CLI settings))\n documentation_rules = cfnlint.core.get_rules([], [], ['I', 'E', 'W'], {}, True)\n cfnlint.maintenance.update_documentation(documentation_rules)\n exit(0)\n\n if config.update_iam_policies:\n cfnlint.maintenance.update_iam_policies()\n exit(0)\n\n if config.listrules:\n rules = cfnlint.core.get_rules(\n config.append_rules,\n config.ignore_checks,\n config.include_checks,\n config.configure_rules\n )\n print(rules)\n exit(0)\n\n if not sys.stdin.isatty() and not config.templates:\n return(config, [None], formatter)\n\n if not config.templates:\n # Not specified, print the help\n config.parser.print_help()\n exit(1)\n\n return(config, config.templates, formatter)\n\n\ndef get_template_rules(filename, args):\n \"\"\" Get Template Configuration items and set them as default values\"\"\"\n\n (template, matches) = cfnlint.decode.decode(filename, args.ignore_bad_template)\n\n if matches:\n return(template, [], matches)\n\n args.template_args = template\n\n rules = cfnlint.core.get_rules(\n args.append_rules,\n args.ignore_checks,\n args.include_checks,\n args.configure_rules,\n args.include_experimental,\n )\n\n return(template, rules, [])\n\n\ndef run_checks(filename, template, rules, regions):\n \"\"\"Run Checks against the template\"\"\"\n if regions:\n if not set(regions).issubset(set(REGIONS)):\n unsupported_regions = list(set(regions).difference(set(REGIONS)))\n msg = 'Regions %s are unsupported. Supported regions are %s' % (unsupported_regions, REGIONS)\n raise InvalidRegionException(msg, 32)\n\n matches = []\n\n runner = cfnlint.Runner(rules, filename, template, regions)\n matches.extend(runner.transform())\n # Only do rule analysis if Transform was successful\n if not matches:\n try:\n matches.extend(runner.run())\n except Exception as err: # pylint: disable=W0703\n msg = 'Tried to process rules on file %s but got an error: %s' % (filename, str(err))\n UnexpectedRuleException(msg, 1)\n matches.sort(key=lambda x: (x.filename, x.linenumber, x.rule.id))\n\n return(matches)\n", "path": "src/cfnlint/core.py"}]}
| 2,586 | 99 |
gh_patches_debug_4369
|
rasdani/github-patches
|
git_diff
|
openvinotoolkit__training_extensions-2672
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
POT -> PTQ transition in docs
Steps to be done:
- Rename POT -> PTQ
- Replace POT docs links with NNCF.PTQ docs
- Mention that the output model would be quantized to INT8
Related pages:
https://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/models_optimization.html
https://openvinotoolkit.github.io/training_extensions/stable/guide/tutorials/base/how_to_train/classification.html#optimization and other tasks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/otx/cli/tools/eval.py`
Content:
```
1 """Model quality evaluation tool."""
2
3 # Copyright (C) 2021 Intel Corporation
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions
15 # and limitations under the License.
16
17 import json
18 from pathlib import Path
19
20 # Update environment variables for CLI use
21 import otx.cli # noqa: F401
22 from otx.api.entities.inference_parameters import InferenceParameters
23 from otx.api.entities.model_template import TaskType
24 from otx.api.entities.resultset import ResultSetEntity
25 from otx.api.entities.subset import Subset
26 from otx.api.entities.task_environment import TaskEnvironment
27 from otx.cli.manager import ConfigManager
28 from otx.cli.utils.importing import get_impl_class
29 from otx.cli.utils.io import read_model
30 from otx.cli.utils.nncf import is_checkpoint_nncf
31 from otx.cli.utils.parser import (
32 add_hyper_parameters_sub_parser,
33 get_override_param,
34 get_parser_and_hprams_data,
35 )
36 from otx.core.data.adapter import get_dataset_adapter
37 from otx.utils.logger import config_logger
38
39 # pylint: disable=too-many-locals
40
41
42 def get_args():
43 """Parses command line arguments."""
44 parser, hyper_parameters, params = get_parser_and_hprams_data()
45
46 parser.add_argument(
47 "--test-data-roots",
48 help="Comma-separated paths to test data folders.",
49 )
50 parser.add_argument(
51 "--load-weights",
52 help="Load model weights from previously saved checkpoint."
53 "It could be a trained/optimized model (POT only) or exported model.",
54 )
55 parser.add_argument(
56 "-o",
57 "--output",
58 help="Location where the intermediate output of the task will be stored.",
59 )
60 parser.add_argument(
61 "--workspace",
62 help="Path to the workspace where the command will run.",
63 default=None,
64 )
65 parser.add_argument(
66 "--data",
67 type=str,
68 default=None,
69 help="The data.yaml path want to use in train task.",
70 )
71
72 add_hyper_parameters_sub_parser(parser, hyper_parameters, modes=("INFERENCE",))
73 override_param = get_override_param(params)
74
75 return parser.parse_args(), override_param
76
77
78 def check_label_schemas(label_schema_a, label_schema_b):
79 """Checks that both passed label schemas have labels with the same names.
80
81 If it is False that it raises RuntimeError.
82 """
83
84 for model_label, snapshot_label in zip(label_schema_a.get_labels(False), label_schema_b.get_labels(False)):
85 if model_label.name != snapshot_label.name:
86 raise RuntimeError(
87 "Labels schemas from model and dataset are different: " f"\n{label_schema_a} \n\tvs\n{label_schema_b}"
88 )
89
90
91 def main():
92 """Main function that is used for model evaluation."""
93
94 # Dynamically create an argument parser based on override parameters.
95 args, override_param = get_args()
96
97 config_manager = ConfigManager(args, workspace_root=args.workspace, mode="eval")
98 config_logger(config_manager.output_path / "otx.log", "INFO")
99 # Auto-Configuration for model template
100 config_manager.configure_template()
101
102 if not args.load_weights and config_manager.check_workspace():
103 latest_model_path = (
104 config_manager.workspace_root / "outputs" / "latest_trained_model" / "models" / "weights.pth"
105 )
106 args.load_weights = str(latest_model_path)
107
108 # Update Hyper Parameter Configs
109 hyper_parameters = config_manager.get_hyparams_config(override_param)
110
111 # Get classes for Task, ConfigurableParameters and Dataset.
112 template = config_manager.template
113 if any(args.load_weights.endswith(x) for x in (".bin", ".xml", ".zip")):
114 task_class = get_impl_class(template.entrypoints.openvino)
115 elif args.load_weights.endswith(".pth"):
116 if is_checkpoint_nncf(args.load_weights):
117 task_class = get_impl_class(template.entrypoints.nncf)
118 else:
119 task_class = get_impl_class(template.entrypoints.base)
120 else:
121 raise ValueError(f"Unsupported file: {args.load_weights}")
122
123 # Auto-Configuration for Dataset configuration
124 config_manager.configure_data_config(update_data_yaml=config_manager.check_workspace())
125 dataset_config = config_manager.get_dataset_config(subsets=["test"])
126 dataset_adapter = get_dataset_adapter(**dataset_config)
127 dataset, label_schema = dataset_adapter.get_otx_dataset(), dataset_adapter.get_label_schema()
128
129 environment = TaskEnvironment(
130 model=None,
131 hyper_parameters=hyper_parameters,
132 label_schema=label_schema,
133 model_template=template,
134 )
135
136 environment.model = read_model(environment.get_model_configuration(), args.load_weights, None)
137
138 task = task_class(task_environment=environment)
139
140 validation_dataset = dataset.get_subset(Subset.TESTING)
141 predicted_validation_dataset = task.infer(
142 # temp (sungchul): remain annotation for visual prompting
143 validation_dataset
144 if getattr(task, "task_type", None) == TaskType.VISUAL_PROMPTING
145 else validation_dataset.with_empty_annotations(),
146 InferenceParameters(is_evaluation=False),
147 )
148
149 resultset = ResultSetEntity(
150 model=environment.model,
151 ground_truth_dataset=validation_dataset,
152 prediction_dataset=predicted_validation_dataset,
153 )
154 task.evaluate(resultset)
155 assert resultset.performance is not None
156 print(resultset.performance)
157
158 output_path = Path(args.output) if args.output else config_manager.output_path
159 performance = {resultset.performance.score.name: resultset.performance.score.value}
160 if hasattr(task, "avg_time_per_image"):
161 performance["avg_time_per_image"] = task.avg_time_per_image
162 with open(output_path / "performance.json", "w", encoding="UTF-8") as write_file:
163 json.dump(performance, write_file)
164
165 return dict(retcode=0, template=template.name)
166
167
168 if __name__ == "__main__":
169 main()
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/otx/cli/tools/eval.py b/src/otx/cli/tools/eval.py
--- a/src/otx/cli/tools/eval.py
+++ b/src/otx/cli/tools/eval.py
@@ -49,8 +49,8 @@
)
parser.add_argument(
"--load-weights",
- help="Load model weights from previously saved checkpoint."
- "It could be a trained/optimized model (POT only) or exported model.",
+ help="Load model weights from previously saved checkpoint. "
+ "It could be a trained/optimized model (with PTQ only) or exported model.",
)
parser.add_argument(
"-o",
|
{"golden_diff": "diff --git a/src/otx/cli/tools/eval.py b/src/otx/cli/tools/eval.py\n--- a/src/otx/cli/tools/eval.py\n+++ b/src/otx/cli/tools/eval.py\n@@ -49,8 +49,8 @@\n )\n parser.add_argument(\n \"--load-weights\",\n- help=\"Load model weights from previously saved checkpoint.\"\n- \"It could be a trained/optimized model (POT only) or exported model.\",\n+ help=\"Load model weights from previously saved checkpoint. \"\n+ \"It could be a trained/optimized model (with PTQ only) or exported model.\",\n )\n parser.add_argument(\n \"-o\",\n", "issue": "POT -> PTQ transition in docs\nSteps to be done:\r\n- Rename POT -> PTQ\r\n- Replace POT docs links with NNCF.PTQ docs\r\n- Mention that the output model would be quantized to INT8\r\n\r\nRelated pages: \r\nhttps://openvinotoolkit.github.io/training_extensions/stable/guide/explanation/additional_features/models_optimization.html\r\nhttps://openvinotoolkit.github.io/training_extensions/stable/guide/tutorials/base/how_to_train/classification.html#optimization and other tasks\n", "before_files": [{"content": "\"\"\"Model quality evaluation tool.\"\"\"\n\n# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport json\nfrom pathlib import Path\n\n# Update environment variables for CLI use\nimport otx.cli # noqa: F401\nfrom otx.api.entities.inference_parameters import InferenceParameters\nfrom otx.api.entities.model_template import TaskType\nfrom otx.api.entities.resultset import ResultSetEntity\nfrom otx.api.entities.subset import Subset\nfrom otx.api.entities.task_environment import TaskEnvironment\nfrom otx.cli.manager import ConfigManager\nfrom otx.cli.utils.importing import get_impl_class\nfrom otx.cli.utils.io import read_model\nfrom otx.cli.utils.nncf import is_checkpoint_nncf\nfrom otx.cli.utils.parser import (\n add_hyper_parameters_sub_parser,\n get_override_param,\n get_parser_and_hprams_data,\n)\nfrom otx.core.data.adapter import get_dataset_adapter\nfrom otx.utils.logger import config_logger\n\n# pylint: disable=too-many-locals\n\n\ndef get_args():\n \"\"\"Parses command line arguments.\"\"\"\n parser, hyper_parameters, params = get_parser_and_hprams_data()\n\n parser.add_argument(\n \"--test-data-roots\",\n help=\"Comma-separated paths to test data folders.\",\n )\n parser.add_argument(\n \"--load-weights\",\n help=\"Load model weights from previously saved checkpoint.\"\n \"It could be a trained/optimized model (POT only) or exported model.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"Location where the intermediate output of the task will be stored.\",\n )\n parser.add_argument(\n \"--workspace\",\n help=\"Path to the workspace where the command will run.\",\n default=None,\n )\n parser.add_argument(\n \"--data\",\n type=str,\n default=None,\n help=\"The data.yaml path want to use in train task.\",\n )\n\n add_hyper_parameters_sub_parser(parser, hyper_parameters, modes=(\"INFERENCE\",))\n override_param = get_override_param(params)\n\n return parser.parse_args(), override_param\n\n\ndef check_label_schemas(label_schema_a, label_schema_b):\n \"\"\"Checks that both passed label schemas have labels with the same names.\n\n If it is False that it raises RuntimeError.\n \"\"\"\n\n for model_label, snapshot_label in zip(label_schema_a.get_labels(False), label_schema_b.get_labels(False)):\n if model_label.name != snapshot_label.name:\n raise RuntimeError(\n \"Labels schemas from model and dataset are different: \" f\"\\n{label_schema_a} \\n\\tvs\\n{label_schema_b}\"\n )\n\n\ndef main():\n \"\"\"Main function that is used for model evaluation.\"\"\"\n\n # Dynamically create an argument parser based on override parameters.\n args, override_param = get_args()\n\n config_manager = ConfigManager(args, workspace_root=args.workspace, mode=\"eval\")\n config_logger(config_manager.output_path / \"otx.log\", \"INFO\")\n # Auto-Configuration for model template\n config_manager.configure_template()\n\n if not args.load_weights and config_manager.check_workspace():\n latest_model_path = (\n config_manager.workspace_root / \"outputs\" / \"latest_trained_model\" / \"models\" / \"weights.pth\"\n )\n args.load_weights = str(latest_model_path)\n\n # Update Hyper Parameter Configs\n hyper_parameters = config_manager.get_hyparams_config(override_param)\n\n # Get classes for Task, ConfigurableParameters and Dataset.\n template = config_manager.template\n if any(args.load_weights.endswith(x) for x in (\".bin\", \".xml\", \".zip\")):\n task_class = get_impl_class(template.entrypoints.openvino)\n elif args.load_weights.endswith(\".pth\"):\n if is_checkpoint_nncf(args.load_weights):\n task_class = get_impl_class(template.entrypoints.nncf)\n else:\n task_class = get_impl_class(template.entrypoints.base)\n else:\n raise ValueError(f\"Unsupported file: {args.load_weights}\")\n\n # Auto-Configuration for Dataset configuration\n config_manager.configure_data_config(update_data_yaml=config_manager.check_workspace())\n dataset_config = config_manager.get_dataset_config(subsets=[\"test\"])\n dataset_adapter = get_dataset_adapter(**dataset_config)\n dataset, label_schema = dataset_adapter.get_otx_dataset(), dataset_adapter.get_label_schema()\n\n environment = TaskEnvironment(\n model=None,\n hyper_parameters=hyper_parameters,\n label_schema=label_schema,\n model_template=template,\n )\n\n environment.model = read_model(environment.get_model_configuration(), args.load_weights, None)\n\n task = task_class(task_environment=environment)\n\n validation_dataset = dataset.get_subset(Subset.TESTING)\n predicted_validation_dataset = task.infer(\n # temp (sungchul): remain annotation for visual prompting\n validation_dataset\n if getattr(task, \"task_type\", None) == TaskType.VISUAL_PROMPTING\n else validation_dataset.with_empty_annotations(),\n InferenceParameters(is_evaluation=False),\n )\n\n resultset = ResultSetEntity(\n model=environment.model,\n ground_truth_dataset=validation_dataset,\n prediction_dataset=predicted_validation_dataset,\n )\n task.evaluate(resultset)\n assert resultset.performance is not None\n print(resultset.performance)\n\n output_path = Path(args.output) if args.output else config_manager.output_path\n performance = {resultset.performance.score.name: resultset.performance.score.value}\n if hasattr(task, \"avg_time_per_image\"):\n performance[\"avg_time_per_image\"] = task.avg_time_per_image\n with open(output_path / \"performance.json\", \"w\", encoding=\"UTF-8\") as write_file:\n json.dump(performance, write_file)\n\n return dict(retcode=0, template=template.name)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/otx/cli/tools/eval.py"}], "after_files": [{"content": "\"\"\"Model quality evaluation tool.\"\"\"\n\n# Copyright (C) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions\n# and limitations under the License.\n\nimport json\nfrom pathlib import Path\n\n# Update environment variables for CLI use\nimport otx.cli # noqa: F401\nfrom otx.api.entities.inference_parameters import InferenceParameters\nfrom otx.api.entities.model_template import TaskType\nfrom otx.api.entities.resultset import ResultSetEntity\nfrom otx.api.entities.subset import Subset\nfrom otx.api.entities.task_environment import TaskEnvironment\nfrom otx.cli.manager import ConfigManager\nfrom otx.cli.utils.importing import get_impl_class\nfrom otx.cli.utils.io import read_model\nfrom otx.cli.utils.nncf import is_checkpoint_nncf\nfrom otx.cli.utils.parser import (\n add_hyper_parameters_sub_parser,\n get_override_param,\n get_parser_and_hprams_data,\n)\nfrom otx.core.data.adapter import get_dataset_adapter\nfrom otx.utils.logger import config_logger\n\n# pylint: disable=too-many-locals\n\n\ndef get_args():\n \"\"\"Parses command line arguments.\"\"\"\n parser, hyper_parameters, params = get_parser_and_hprams_data()\n\n parser.add_argument(\n \"--test-data-roots\",\n help=\"Comma-separated paths to test data folders.\",\n )\n parser.add_argument(\n \"--load-weights\",\n help=\"Load model weights from previously saved checkpoint. \"\n \"It could be a trained/optimized model (with PTQ only) or exported model.\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"Location where the intermediate output of the task will be stored.\",\n )\n parser.add_argument(\n \"--workspace\",\n help=\"Path to the workspace where the command will run.\",\n default=None,\n )\n parser.add_argument(\n \"--data\",\n type=str,\n default=None,\n help=\"The data.yaml path want to use in train task.\",\n )\n\n add_hyper_parameters_sub_parser(parser, hyper_parameters, modes=(\"INFERENCE\",))\n override_param = get_override_param(params)\n\n return parser.parse_args(), override_param\n\n\ndef check_label_schemas(label_schema_a, label_schema_b):\n \"\"\"Checks that both passed label schemas have labels with the same names.\n\n If it is False that it raises RuntimeError.\n \"\"\"\n\n for model_label, snapshot_label in zip(label_schema_a.get_labels(False), label_schema_b.get_labels(False)):\n if model_label.name != snapshot_label.name:\n raise RuntimeError(\n \"Labels schemas from model and dataset are different: \" f\"\\n{label_schema_a} \\n\\tvs\\n{label_schema_b}\"\n )\n\n\ndef main():\n \"\"\"Main function that is used for model evaluation.\"\"\"\n\n # Dynamically create an argument parser based on override parameters.\n args, override_param = get_args()\n\n config_manager = ConfigManager(args, workspace_root=args.workspace, mode=\"eval\")\n config_logger(config_manager.output_path / \"otx.log\", \"INFO\")\n # Auto-Configuration for model template\n config_manager.configure_template()\n\n if not args.load_weights and config_manager.check_workspace():\n latest_model_path = (\n config_manager.workspace_root / \"outputs\" / \"latest_trained_model\" / \"models\" / \"weights.pth\"\n )\n args.load_weights = str(latest_model_path)\n\n # Update Hyper Parameter Configs\n hyper_parameters = config_manager.get_hyparams_config(override_param)\n\n # Get classes for Task, ConfigurableParameters and Dataset.\n template = config_manager.template\n if any(args.load_weights.endswith(x) for x in (\".bin\", \".xml\", \".zip\")):\n task_class = get_impl_class(template.entrypoints.openvino)\n elif args.load_weights.endswith(\".pth\"):\n if is_checkpoint_nncf(args.load_weights):\n task_class = get_impl_class(template.entrypoints.nncf)\n else:\n task_class = get_impl_class(template.entrypoints.base)\n else:\n raise ValueError(f\"Unsupported file: {args.load_weights}\")\n\n # Auto-Configuration for Dataset configuration\n config_manager.configure_data_config(update_data_yaml=config_manager.check_workspace())\n dataset_config = config_manager.get_dataset_config(subsets=[\"test\"])\n dataset_adapter = get_dataset_adapter(**dataset_config)\n dataset, label_schema = dataset_adapter.get_otx_dataset(), dataset_adapter.get_label_schema()\n\n environment = TaskEnvironment(\n model=None,\n hyper_parameters=hyper_parameters,\n label_schema=label_schema,\n model_template=template,\n )\n\n environment.model = read_model(environment.get_model_configuration(), args.load_weights, None)\n\n task = task_class(task_environment=environment)\n\n validation_dataset = dataset.get_subset(Subset.TESTING)\n predicted_validation_dataset = task.infer(\n # temp (sungchul): remain annotation for visual prompting\n validation_dataset\n if getattr(task, \"task_type\", None) == TaskType.VISUAL_PROMPTING\n else validation_dataset.with_empty_annotations(),\n InferenceParameters(is_evaluation=False),\n )\n\n resultset = ResultSetEntity(\n model=environment.model,\n ground_truth_dataset=validation_dataset,\n prediction_dataset=predicted_validation_dataset,\n )\n task.evaluate(resultset)\n assert resultset.performance is not None\n print(resultset.performance)\n\n output_path = Path(args.output) if args.output else config_manager.output_path\n performance = {resultset.performance.score.name: resultset.performance.score.value}\n if hasattr(task, \"avg_time_per_image\"):\n performance[\"avg_time_per_image\"] = task.avg_time_per_image\n with open(output_path / \"performance.json\", \"w\", encoding=\"UTF-8\") as write_file:\n json.dump(performance, write_file)\n\n return dict(retcode=0, template=template.name)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/otx/cli/tools/eval.py"}]}
| 2,123 | 150 |
gh_patches_debug_5409
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-183
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move documentation to separate repo and host it on gh-pages
Sphinx has served us well, but we fell its too limiting for what we have planned for Ariadne.
We've decided to migrate the site to the [Docusaurus](https://docusaurus.io) and keep it on separate repo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/master/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 # import os
16 # import sys
17 # sys.path.insert(0, os.path.abspath('.'))
18
19 from datetime import date
20
21 year = date.today().year
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "Ariadne"
27 copyright = "%s, Mirumee Software" % year
28 author = "Mirumee Software"
29
30 # The short X.Y version
31 version = "4"
32 # The full version, including alpha/beta/rc tags
33 release = "0.4"
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # If your documentation needs a minimal Sphinx version, state it here.
39 #
40 # needs_sphinx = '1.0'
41
42 # Add any Sphinx extension module names here, as strings. They can be
43 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
44 # ones.
45 extensions = ["sphinxcontrib.asyncio"]
46
47 # Add any paths that contain templates here, relative to this directory.
48 templates_path = ["_templates"]
49
50 # The suffix(es) of source filenames.
51 # You can specify multiple suffix as a list of string:
52 #
53 # source_suffix = ['.rst', '.md']
54 source_suffix = [".rst", ".md"]
55
56 # The master toctree document.
57 master_doc = "index"
58
59 # The language for content autogenerated by Sphinx. Refer to documentation
60 # for a list of supported languages.
61 #
62 # This is also used if you do content translation via gettext catalogs.
63 # Usually you set "language" from the command line for these cases.
64 language = None
65
66 # List of patterns, relative to source directory, that match files and
67 # directories to ignore when looking for source files.
68 # This pattern also affects html_static_path and html_extra_path.
69 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
70
71 # The name of the Pygments (syntax highlighting) style to use.
72 pygments_style = None
73
74
75 # -- Options for HTML output -------------------------------------------------
76
77 # The theme to use for HTML and HTML Help pages. See the documentation for
78 # a list of builtin themes.
79 #
80 html_theme = "alabaster"
81
82 # Theme options are theme-specific and customize the look and feel of a theme
83 # further. For a list of options available for each theme, see the
84 # documentation.
85 #
86 html_theme_options = {
87 "logo": "logo-vertical.png",
88 "github_user": "mirumee",
89 "github_repo": "ariadne",
90 }
91
92 # Add any paths that contain custom static files (such as style sheets) here,
93 # relative to this directory. They are copied after the builtin static files,
94 # so a file named "default.css" will overwrite the builtin "default.css".
95 html_static_path = ["_static"]
96
97 # Custom sidebar templates, must be a dictionary that maps document names
98 # to template names.
99 #
100 # The default sidebars (for documents that don't match any pattern) are
101 # defined by theme itself. Builtin themes are using these templates by
102 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
103 # 'searchbox.html']``.
104 #
105 # html_sidebars = {}
106
107
108 # -- Options for HTMLHelp output ---------------------------------------------
109
110 # Output file base name for HTML help builder.
111 htmlhelp_basename = "Ariadnedoc"
112
113
114 # -- Options for LaTeX output ------------------------------------------------
115
116 latex_elements = {
117 # The paper size ('letterpaper' or 'a4paper').
118 #
119 # 'papersize': 'letterpaper',
120 # The font size ('10pt', '11pt' or '12pt').
121 #
122 # 'pointsize': '10pt',
123 # Additional stuff for the LaTeX preamble.
124 #
125 # 'preamble': '',
126 # Latex figure (float) alignment
127 #
128 # 'figure_align': 'htbp',
129 }
130
131 # Grouping the document tree into LaTeX files. List of tuples
132 # (source start file, target name, title,
133 # author, documentclass [howto, manual, or own class]).
134 latex_documents = [
135 (master_doc, "Ariadne.tex", "Ariadne Documentation", "Mirumee Software", "manual")
136 ]
137
138
139 # -- Options for manual page output ------------------------------------------
140
141 # One entry per manual page. List of tuples
142 # (source start file, name, description, authors, manual section).
143 man_pages = [(master_doc, "ariadne", "Ariadne Documentation", [author], 1)]
144
145
146 # -- Options for Texinfo output ----------------------------------------------
147
148 # Grouping the document tree into Texinfo files. List of tuples
149 # (source start file, target name, title, author,
150 # dir menu entry, description, category)
151 texinfo_documents = [
152 (
153 master_doc,
154 "Ariadne",
155 "Ariadne",
156 author,
157 "Ariadne",
158 "Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.",
159 "Miscellaneous",
160 )
161 ]
162
163
164 # -- Options for Epub output -------------------------------------------------
165
166 # Bibliographic Dublin Core info.
167 epub_title = project
168
169 # The unique identifier of the text. This can be a ISBN number
170 # or the project homepage.
171 #
172 # epub_identifier = ''
173
174 # A unique identification for the text.
175 #
176 # epub_uid = ''
177
178 # A list of files that should not be packed into the epub file.
179 epub_exclude_files = ["search.html"]
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -42,7 +42,7 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ["sphinxcontrib.asyncio"]
+extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -42,7 +42,7 @@\n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n-extensions = [\"sphinxcontrib.asyncio\"]\n+extensions = []\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "Move documentation to separate repo and host it on gh-pages\nSphinx has served us well, but we fell its too limiting for what we have planned for Ariadne.\r\n\r\nWe've decided to migrate the site to the [Docusaurus](https://docusaurus.io) and keep it on separate repo.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\n\nyear = date.today().year\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Ariadne\"\ncopyright = \"%s, Mirumee Software\" % year\nauthor = \"Mirumee Software\"\n\n# The short X.Y version\nversion = \"4\"\n# The full version, including alpha/beta/rc tags\nrelease = \"0.4\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"sphinxcontrib.asyncio\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"logo\": \"logo-vertical.png\",\n \"github_user\": \"mirumee\",\n \"github_repo\": \"ariadne\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Ariadnedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Ariadne.tex\", \"Ariadne Documentation\", \"Mirumee Software\", \"manual\")\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ariadne\", \"Ariadne Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Ariadne\",\n \"Ariadne\",\n author,\n \"Ariadne\",\n \"Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\n\nyear = date.today().year\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Ariadne\"\ncopyright = \"%s, Mirumee Software\" % year\nauthor = \"Mirumee Software\"\n\n# The short X.Y version\nversion = \"4\"\n# The full version, including alpha/beta/rc tags\nrelease = \"0.4\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"alabaster\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"logo\": \"logo-vertical.png\",\n \"github_user\": \"mirumee\",\n \"github_repo\": \"ariadne\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Ariadnedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Ariadne.tex\", \"Ariadne Documentation\", \"Mirumee Software\", \"manual\")\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ariadne\", \"Ariadne Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Ariadne\",\n \"Ariadne\",\n author,\n \"Ariadne\",\n \"Ariadne is a Python library for implementing GraphQL servers, inspired by Apollo Server and built with GraphQL-core-next.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n", "path": "docs/conf.py"}]}
| 2,019 | 108 |
gh_patches_debug_27258
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1674
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
prompt=none not carried for silent authentication
The [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be.
To authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.
Now, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...
> "Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page."
It's basically the same URL as authorization but with `&prompt=none` added.
prompt=none not carried for silent authentication
The [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be.
To authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.
Now, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...
> "Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page."
It's basically the same URL as authorization but with `&prompt=none` added.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/openid/views.py`
Content:
```
1 import urllib.parse
2
3 import colander
4 import requests
5 from pyramid import httpexceptions
6
7 from cornice.validators import colander_validator
8 from kinto.core import Service
9 from kinto.core.errors import raise_invalid, ERRORS
10 from kinto.core.utils import random_bytes_hex
11 from kinto.core.resource.schema import ErrorResponseSchema
12 from kinto.core.schema import URL
13
14 from .utils import fetch_openid_config
15
16
17 DEFAULT_STATE_TTL_SECONDS = 3600
18 DEFAULT_STATE_LENGTH = 32
19
20
21 class RedirectHeadersSchema(colander.MappingSchema):
22 """Redirect response headers."""
23 location = colander.SchemaNode(colander.String(), name='Location')
24
25
26 class RedirectResponseSchema(colander.MappingSchema):
27 """Redirect response schema."""
28 headers = RedirectHeadersSchema()
29
30
31 response_schemas = {
32 '307': RedirectResponseSchema(description='Successful redirection.'),
33 '400': ErrorResponseSchema(description='The request is invalid.'),
34 }
35
36
37 def provider_validator(request, **kwargs):
38 """
39 This validator verifies that the validator in URL (eg. /openid/auth0/login)
40 is a configured OpenIDConnect policy.
41 """
42 provider = request.matchdict['provider']
43 used = request.registry.settings.get('multiauth.policy.%s.use' % provider, '')
44 if not used.endswith('OpenIDConnectPolicy'):
45 request.errors.add('path', 'provider', 'Unknow provider %r' % provider)
46
47
48 class LoginQuerystringSchema(colander.MappingSchema):
49 """
50 Querystring schema for the login endpoint.
51 """
52 callback = URL()
53 scope = colander.SchemaNode(colander.String())
54
55
56 class LoginSchema(colander.MappingSchema):
57 querystring = LoginQuerystringSchema()
58
59
60 login = Service(name='openid_login',
61 path='/openid/{provider}/login',
62 description='Initiate the OAuth2 login')
63
64
65 @login.get(schema=LoginSchema(),
66 validators=(colander_validator, provider_validator),
67 response_schemas=response_schemas)
68 def get_login(request):
69 """Initiates to login dance for the specified scopes and callback URI
70 using appropriate redirections."""
71
72 # Settings.
73 provider = request.matchdict['provider']
74 settings_prefix = 'multiauth.policy.%s.' % provider
75 issuer = request.registry.settings[settings_prefix + 'issuer']
76 client_id = request.registry.settings[settings_prefix + 'client_id']
77 userid_field = request.registry.settings.get(settings_prefix + 'userid_field')
78 state_ttl = int(request.registry.settings.get(settings_prefix + 'state_ttl_seconds',
79 DEFAULT_STATE_TTL_SECONDS))
80 state_length = int(request.registry.settings.get(settings_prefix + 'state_length',
81 DEFAULT_STATE_LENGTH))
82
83 # Read OpenID configuration (cached by issuer)
84 oid_config = fetch_openid_config(issuer)
85 auth_endpoint = oid_config['authorization_endpoint']
86
87 scope = request.GET['scope']
88 callback = request.GET['callback']
89
90 # Check that email scope is requested if userid field is configured as email.
91 if userid_field == 'email' and 'email' not in scope:
92 error_details = {
93 'name': 'scope',
94 'description': "Provider %s requires 'email' scope" % provider,
95 }
96 raise_invalid(request, **error_details)
97
98 # Generate a random string as state.
99 # And save it until code is traded.
100 state = random_bytes_hex(state_length)
101 request.registry.cache.set('openid:state:' + state, callback, ttl=state_ttl)
102
103 # Redirect the client to the Identity Provider that will eventually redirect
104 # to the OpenID token endpoint.
105 token_uri = request.route_url('openid_token', provider=provider) + '?'
106 params = dict(client_id=client_id, response_type='code', scope=scope,
107 redirect_uri=token_uri, state=state)
108 redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))
109 raise httpexceptions.HTTPTemporaryRedirect(redirect)
110
111
112 class TokenQuerystringSchema(colander.MappingSchema):
113 """
114 Querystring schema for the token endpoint.
115 """
116 code = colander.SchemaNode(colander.String())
117 state = colander.SchemaNode(colander.String())
118
119
120 class TokenSchema(colander.MappingSchema):
121 querystring = TokenQuerystringSchema()
122
123
124 token = Service(name='openid_token',
125 path='/openid/{provider}/token',
126 description='')
127
128
129 @token.get(schema=TokenSchema(),
130 validators=(colander_validator, provider_validator))
131 def get_token(request):
132 """Trades the specified code and state against access and ID tokens.
133 The client is redirected to the original ``callback`` URI with the
134 result in querystring."""
135
136 # Settings.
137 provider = request.matchdict['provider']
138 settings_prefix = 'multiauth.policy.%s.' % provider
139 issuer = request.registry.settings[settings_prefix + 'issuer']
140 client_id = request.registry.settings[settings_prefix + 'client_id']
141 client_secret = request.registry.settings[settings_prefix + 'client_secret']
142
143 # Read OpenID configuration (cached by issuer)
144 oid_config = fetch_openid_config(issuer)
145 token_endpoint = oid_config['token_endpoint']
146
147 code = request.GET['code']
148 state = request.GET['state']
149
150 # State can be used only once.
151 callback = request.registry.cache.delete('openid:state:' + state)
152 if callback is None:
153 error_details = {
154 'name': 'state',
155 'description': 'Invalid state',
156 'errno': ERRORS.INVALID_AUTH_TOKEN.value,
157 }
158 raise_invalid(request, **error_details)
159
160 # Trade the code for tokens on the Identity Provider.
161 # Google Identity requires to specify again redirect_uri.
162 redirect_uri = request.route_url('openid_token', provider=provider) + '?'
163 data = {
164 'code': code,
165 'client_id': client_id,
166 'client_secret': client_secret,
167 'redirect_uri': redirect_uri,
168 'grant_type': 'authorization_code',
169 }
170 resp = requests.post(token_endpoint, data=data)
171
172 # The IdP response is forwarded to the client in the querystring/location hash.
173 # (eg. callback=`http://localhost:3000/#tokens=`)
174 redirect = callback + urllib.parse.quote(resp.text)
175 raise httpexceptions.HTTPTemporaryRedirect(redirect)
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/openid/views.py b/kinto/plugins/openid/views.py
--- a/kinto/plugins/openid/views.py
+++ b/kinto/plugins/openid/views.py
@@ -51,6 +51,9 @@
"""
callback = URL()
scope = colander.SchemaNode(colander.String())
+ prompt = colander.SchemaNode(colander.String(),
+ validator=colander.Regex("none"),
+ missing=colander.drop)
class LoginSchema(colander.MappingSchema):
@@ -86,6 +89,7 @@
scope = request.GET['scope']
callback = request.GET['callback']
+ prompt = request.GET.get('prompt')
# Check that email scope is requested if userid field is configured as email.
if userid_field == 'email' and 'email' not in scope:
@@ -105,6 +109,9 @@
token_uri = request.route_url('openid_token', provider=provider) + '?'
params = dict(client_id=client_id, response_type='code', scope=scope,
redirect_uri=token_uri, state=state)
+ if prompt:
+ # The 'prompt' parameter is optional.
+ params['prompt'] = prompt
redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))
raise httpexceptions.HTTPTemporaryRedirect(redirect)
|
{"golden_diff": "diff --git a/kinto/plugins/openid/views.py b/kinto/plugins/openid/views.py\n--- a/kinto/plugins/openid/views.py\n+++ b/kinto/plugins/openid/views.py\n@@ -51,6 +51,9 @@\n \"\"\"\n callback = URL()\n scope = colander.SchemaNode(colander.String())\n+ prompt = colander.SchemaNode(colander.String(),\n+ validator=colander.Regex(\"none\"),\n+ missing=colander.drop)\n \n \n class LoginSchema(colander.MappingSchema):\n@@ -86,6 +89,7 @@\n \n scope = request.GET['scope']\n callback = request.GET['callback']\n+ prompt = request.GET.get('prompt')\n \n # Check that email scope is requested if userid field is configured as email.\n if userid_field == 'email' and 'email' not in scope:\n@@ -105,6 +109,9 @@\n token_uri = request.route_url('openid_token', provider=provider) + '?'\n params = dict(client_id=client_id, response_type='code', scope=scope,\n redirect_uri=token_uri, state=state)\n+ if prompt:\n+ # The 'prompt' parameter is optional.\n+ params['prompt'] = prompt\n redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n", "issue": "prompt=none not carried for silent authentication\nThe [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be. \r\n\r\nTo authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.\r\n\r\nNow, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...\r\n\r\n> \"Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page.\"\r\n\r\nIt's basically the same URL as authorization but with `&prompt=none` added. \nprompt=none not carried for silent authentication\nThe [documentation](https://auth0.com/docs/api-auth/tutorials/silent-authentication) is more complex that it needs to be. \r\n\r\nTo authenticate you visit `/v1/openid/auth0/login?callback=http...&scopes=...` it redirects to the OIDC provider (e.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...`) which then redirects you back to the `callback` (if all goes well with the password check) with a code that kinto trades in for an access token which it then appends to the `callback` URL.\r\n\r\nNow, if that access token expires (or is *about to* expire!) we need to authenticate again. The default expiration time for the access token (at least with Auth0) is 1 day. To do that, you can simply add `&prompt=none` to the URL. E.g. `https://peterbecom.auth0.com/authorize?client_id=...&response_type=code&scope=openid+email+profile&redirect_uri=...&state=...&prompt=none`. What the OIDC provider does is...\r\n\r\n> \"Silent authentication lets you perform an authentication flow where Auth0 will only reply with redirects, and never with a login page.\"\r\n\r\nIt's basically the same URL as authorization but with `&prompt=none` added. \n", "before_files": [{"content": "import urllib.parse\n\nimport colander\nimport requests\nfrom pyramid import httpexceptions\n\nfrom cornice.validators import colander_validator\nfrom kinto.core import Service\nfrom kinto.core.errors import raise_invalid, ERRORS\nfrom kinto.core.utils import random_bytes_hex\nfrom kinto.core.resource.schema import ErrorResponseSchema\nfrom kinto.core.schema import URL\n\nfrom .utils import fetch_openid_config\n\n\nDEFAULT_STATE_TTL_SECONDS = 3600\nDEFAULT_STATE_LENGTH = 32\n\n\nclass RedirectHeadersSchema(colander.MappingSchema):\n \"\"\"Redirect response headers.\"\"\"\n location = colander.SchemaNode(colander.String(), name='Location')\n\n\nclass RedirectResponseSchema(colander.MappingSchema):\n \"\"\"Redirect response schema.\"\"\"\n headers = RedirectHeadersSchema()\n\n\nresponse_schemas = {\n '307': RedirectResponseSchema(description='Successful redirection.'),\n '400': ErrorResponseSchema(description='The request is invalid.'),\n}\n\n\ndef provider_validator(request, **kwargs):\n \"\"\"\n This validator verifies that the validator in URL (eg. /openid/auth0/login)\n is a configured OpenIDConnect policy.\n \"\"\"\n provider = request.matchdict['provider']\n used = request.registry.settings.get('multiauth.policy.%s.use' % provider, '')\n if not used.endswith('OpenIDConnectPolicy'):\n request.errors.add('path', 'provider', 'Unknow provider %r' % provider)\n\n\nclass LoginQuerystringSchema(colander.MappingSchema):\n \"\"\"\n Querystring schema for the login endpoint.\n \"\"\"\n callback = URL()\n scope = colander.SchemaNode(colander.String())\n\n\nclass LoginSchema(colander.MappingSchema):\n querystring = LoginQuerystringSchema()\n\n\nlogin = Service(name='openid_login',\n path='/openid/{provider}/login',\n description='Initiate the OAuth2 login')\n\n\[email protected](schema=LoginSchema(),\n validators=(colander_validator, provider_validator),\n response_schemas=response_schemas)\ndef get_login(request):\n \"\"\"Initiates to login dance for the specified scopes and callback URI\n using appropriate redirections.\"\"\"\n\n # Settings.\n provider = request.matchdict['provider']\n settings_prefix = 'multiauth.policy.%s.' % provider\n issuer = request.registry.settings[settings_prefix + 'issuer']\n client_id = request.registry.settings[settings_prefix + 'client_id']\n userid_field = request.registry.settings.get(settings_prefix + 'userid_field')\n state_ttl = int(request.registry.settings.get(settings_prefix + 'state_ttl_seconds',\n DEFAULT_STATE_TTL_SECONDS))\n state_length = int(request.registry.settings.get(settings_prefix + 'state_length',\n DEFAULT_STATE_LENGTH))\n\n # Read OpenID configuration (cached by issuer)\n oid_config = fetch_openid_config(issuer)\n auth_endpoint = oid_config['authorization_endpoint']\n\n scope = request.GET['scope']\n callback = request.GET['callback']\n\n # Check that email scope is requested if userid field is configured as email.\n if userid_field == 'email' and 'email' not in scope:\n error_details = {\n 'name': 'scope',\n 'description': \"Provider %s requires 'email' scope\" % provider,\n }\n raise_invalid(request, **error_details)\n\n # Generate a random string as state.\n # And save it until code is traded.\n state = random_bytes_hex(state_length)\n request.registry.cache.set('openid:state:' + state, callback, ttl=state_ttl)\n\n # Redirect the client to the Identity Provider that will eventually redirect\n # to the OpenID token endpoint.\n token_uri = request.route_url('openid_token', provider=provider) + '?'\n params = dict(client_id=client_id, response_type='code', scope=scope,\n redirect_uri=token_uri, state=state)\n redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n\n\nclass TokenQuerystringSchema(colander.MappingSchema):\n \"\"\"\n Querystring schema for the token endpoint.\n \"\"\"\n code = colander.SchemaNode(colander.String())\n state = colander.SchemaNode(colander.String())\n\n\nclass TokenSchema(colander.MappingSchema):\n querystring = TokenQuerystringSchema()\n\n\ntoken = Service(name='openid_token',\n path='/openid/{provider}/token',\n description='')\n\n\[email protected](schema=TokenSchema(),\n validators=(colander_validator, provider_validator))\ndef get_token(request):\n \"\"\"Trades the specified code and state against access and ID tokens.\n The client is redirected to the original ``callback`` URI with the\n result in querystring.\"\"\"\n\n # Settings.\n provider = request.matchdict['provider']\n settings_prefix = 'multiauth.policy.%s.' % provider\n issuer = request.registry.settings[settings_prefix + 'issuer']\n client_id = request.registry.settings[settings_prefix + 'client_id']\n client_secret = request.registry.settings[settings_prefix + 'client_secret']\n\n # Read OpenID configuration (cached by issuer)\n oid_config = fetch_openid_config(issuer)\n token_endpoint = oid_config['token_endpoint']\n\n code = request.GET['code']\n state = request.GET['state']\n\n # State can be used only once.\n callback = request.registry.cache.delete('openid:state:' + state)\n if callback is None:\n error_details = {\n 'name': 'state',\n 'description': 'Invalid state',\n 'errno': ERRORS.INVALID_AUTH_TOKEN.value,\n }\n raise_invalid(request, **error_details)\n\n # Trade the code for tokens on the Identity Provider.\n # Google Identity requires to specify again redirect_uri.\n redirect_uri = request.route_url('openid_token', provider=provider) + '?'\n data = {\n 'code': code,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code',\n }\n resp = requests.post(token_endpoint, data=data)\n\n # The IdP response is forwarded to the client in the querystring/location hash.\n # (eg. callback=`http://localhost:3000/#tokens=`)\n redirect = callback + urllib.parse.quote(resp.text)\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n", "path": "kinto/plugins/openid/views.py"}], "after_files": [{"content": "import urllib.parse\n\nimport colander\nimport requests\nfrom pyramid import httpexceptions\n\nfrom cornice.validators import colander_validator\nfrom kinto.core import Service\nfrom kinto.core.errors import raise_invalid, ERRORS\nfrom kinto.core.utils import random_bytes_hex\nfrom kinto.core.resource.schema import ErrorResponseSchema\nfrom kinto.core.schema import URL\n\nfrom .utils import fetch_openid_config\n\n\nDEFAULT_STATE_TTL_SECONDS = 3600\nDEFAULT_STATE_LENGTH = 32\n\n\nclass RedirectHeadersSchema(colander.MappingSchema):\n \"\"\"Redirect response headers.\"\"\"\n location = colander.SchemaNode(colander.String(), name='Location')\n\n\nclass RedirectResponseSchema(colander.MappingSchema):\n \"\"\"Redirect response schema.\"\"\"\n headers = RedirectHeadersSchema()\n\n\nresponse_schemas = {\n '307': RedirectResponseSchema(description='Successful redirection.'),\n '400': ErrorResponseSchema(description='The request is invalid.'),\n}\n\n\ndef provider_validator(request, **kwargs):\n \"\"\"\n This validator verifies that the validator in URL (eg. /openid/auth0/login)\n is a configured OpenIDConnect policy.\n \"\"\"\n provider = request.matchdict['provider']\n used = request.registry.settings.get('multiauth.policy.%s.use' % provider, '')\n if not used.endswith('OpenIDConnectPolicy'):\n request.errors.add('path', 'provider', 'Unknow provider %r' % provider)\n\n\nclass LoginQuerystringSchema(colander.MappingSchema):\n \"\"\"\n Querystring schema for the login endpoint.\n \"\"\"\n callback = URL()\n scope = colander.SchemaNode(colander.String())\n prompt = colander.SchemaNode(colander.String(),\n validator=colander.Regex(\"none\"),\n missing=colander.drop)\n\n\nclass LoginSchema(colander.MappingSchema):\n querystring = LoginQuerystringSchema()\n\n\nlogin = Service(name='openid_login',\n path='/openid/{provider}/login',\n description='Initiate the OAuth2 login')\n\n\[email protected](schema=LoginSchema(),\n validators=(colander_validator, provider_validator),\n response_schemas=response_schemas)\ndef get_login(request):\n \"\"\"Initiates to login dance for the specified scopes and callback URI\n using appropriate redirections.\"\"\"\n\n # Settings.\n provider = request.matchdict['provider']\n settings_prefix = 'multiauth.policy.%s.' % provider\n issuer = request.registry.settings[settings_prefix + 'issuer']\n client_id = request.registry.settings[settings_prefix + 'client_id']\n userid_field = request.registry.settings.get(settings_prefix + 'userid_field')\n state_ttl = int(request.registry.settings.get(settings_prefix + 'state_ttl_seconds',\n DEFAULT_STATE_TTL_SECONDS))\n state_length = int(request.registry.settings.get(settings_prefix + 'state_length',\n DEFAULT_STATE_LENGTH))\n\n # Read OpenID configuration (cached by issuer)\n oid_config = fetch_openid_config(issuer)\n auth_endpoint = oid_config['authorization_endpoint']\n\n scope = request.GET['scope']\n callback = request.GET['callback']\n prompt = request.GET.get('prompt')\n\n # Check that email scope is requested if userid field is configured as email.\n if userid_field == 'email' and 'email' not in scope:\n error_details = {\n 'name': 'scope',\n 'description': \"Provider %s requires 'email' scope\" % provider,\n }\n raise_invalid(request, **error_details)\n\n # Generate a random string as state.\n # And save it until code is traded.\n state = random_bytes_hex(state_length)\n request.registry.cache.set('openid:state:' + state, callback, ttl=state_ttl)\n\n # Redirect the client to the Identity Provider that will eventually redirect\n # to the OpenID token endpoint.\n token_uri = request.route_url('openid_token', provider=provider) + '?'\n params = dict(client_id=client_id, response_type='code', scope=scope,\n redirect_uri=token_uri, state=state)\n if prompt:\n # The 'prompt' parameter is optional.\n params['prompt'] = prompt\n redirect = '{}?{}'.format(auth_endpoint, urllib.parse.urlencode(params))\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n\n\nclass TokenQuerystringSchema(colander.MappingSchema):\n \"\"\"\n Querystring schema for the token endpoint.\n \"\"\"\n code = colander.SchemaNode(colander.String())\n state = colander.SchemaNode(colander.String())\n\n\nclass TokenSchema(colander.MappingSchema):\n querystring = TokenQuerystringSchema()\n\n\ntoken = Service(name='openid_token',\n path='/openid/{provider}/token',\n description='')\n\n\[email protected](schema=TokenSchema(),\n validators=(colander_validator, provider_validator))\ndef get_token(request):\n \"\"\"Trades the specified code and state against access and ID tokens.\n The client is redirected to the original ``callback`` URI with the\n result in querystring.\"\"\"\n\n # Settings.\n provider = request.matchdict['provider']\n settings_prefix = 'multiauth.policy.%s.' % provider\n issuer = request.registry.settings[settings_prefix + 'issuer']\n client_id = request.registry.settings[settings_prefix + 'client_id']\n client_secret = request.registry.settings[settings_prefix + 'client_secret']\n\n # Read OpenID configuration (cached by issuer)\n oid_config = fetch_openid_config(issuer)\n token_endpoint = oid_config['token_endpoint']\n\n code = request.GET['code']\n state = request.GET['state']\n\n # State can be used only once.\n callback = request.registry.cache.delete('openid:state:' + state)\n if callback is None:\n error_details = {\n 'name': 'state',\n 'description': 'Invalid state',\n 'errno': ERRORS.INVALID_AUTH_TOKEN.value,\n }\n raise_invalid(request, **error_details)\n\n # Trade the code for tokens on the Identity Provider.\n # Google Identity requires to specify again redirect_uri.\n redirect_uri = request.route_url('openid_token', provider=provider) + '?'\n data = {\n 'code': code,\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code',\n }\n resp = requests.post(token_endpoint, data=data)\n\n # The IdP response is forwarded to the client in the querystring/location hash.\n # (eg. callback=`http://localhost:3000/#tokens=`)\n redirect = callback + urllib.parse.quote(resp.text)\n raise httpexceptions.HTTPTemporaryRedirect(redirect)\n", "path": "kinto/plugins/openid/views.py"}]}
| 2,653 | 298 |
gh_patches_debug_21805
|
rasdani/github-patches
|
git_diff
|
psychopy__psychopy-4622
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
404 errors on some of the page in the HELP menus
Version used
- psychopy: 2021.2.3
https://www.psychopy.org/builder/builder.html
https://www.psychopy.org/api/api.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/urls.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """A central location to store information about urls
5 """
6 urls = dict()
7
8 # links based on string names
9 urls['builder'] = "https://www.psychopy.org/builder/builder.html"
10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
11 # NB. builder components get their urls defined by the component code
12 # (so a custom component can have a url)
13
14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases"
15 urls['changelog'] = "https://www.psychopy.org/changelog.html"
16
17 general = "https://www.psychopy.org/general/"
18 urls['prefs'] = general + "prefs.html"
19 urls['prefs.general'] = general + "prefs.html#general-settings"
20 urls['prefs.app'] = general + "prefs.html#application-settings"
21 urls['prefs.coder'] = general + "prefs.html#coder-settings"
22 urls['prefs.builder'] = general + "prefs.html#builder-settings"
23 urls['prefs.connections'] = general + "prefs.html#connection-settings"
24
25 # links keyed by wxIDs (e.g. menu item IDs)
26 urls['psychopyHome'] = "https://www.psychopy.org/"
27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
29 urls['builderHelp'] = urls['builder']
30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html"
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py
--- a/psychopy/app/urls.py
+++ b/psychopy/app/urls.py
@@ -6,7 +6,7 @@
urls = dict()
# links based on string names
-urls['builder'] = "https://www.psychopy.org/builder/builder.html"
+urls['builder'] = "https://www.psychopy.org/builder"
urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
# NB. builder components get their urls defined by the component code
# (so a custom component can have a url)
@@ -24,7 +24,7 @@
# links keyed by wxIDs (e.g. menu item IDs)
urls['psychopyHome'] = "https://www.psychopy.org/"
-urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
+urls['psychopyReference'] = "https://www.psychopy.org/api"
urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
urls['builderHelp'] = urls['builder']
urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
|
{"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]}
| 739 | 285 |
gh_patches_debug_543
|
rasdani/github-patches
|
git_diff
|
edgedb__edgedb-3085
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider implementing ++ operator for JSON
Possible behaviour:
```
db> SELECT to_json('[1,2]') ++ to_json('[3]') = to_json('[1,2,3]');
db> SELECT to_json('{"a": 1}') ++ to_json('{"b":2}') = to_json('{"a":1, "b":2}');
db> SELECT to_json('"123"') ++ to_json('"456"') = to_json('"123456"');
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `edb/buildmeta.py`
Content:
```
1 #
2 # This source file is part of the EdgeDB open source project.
3 #
4 # Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19
20 from __future__ import annotations
21 from typing import *
22
23 import hashlib
24 import importlib.util
25 import json
26 import logging
27 import os
28 import pathlib
29 import pickle
30 import re
31 import subprocess
32 import tempfile
33
34
35 from edb.common import debug
36 from edb.common import devmode
37 from edb.common import verutils
38
39
40 # Increment this whenever the database layout or stdlib changes.
41 EDGEDB_CATALOG_VERSION = 2021_10_19_00_00
42
43
44 class MetadataError(Exception):
45 pass
46
47
48 def get_build_metadata_value(prop: str) -> str:
49 env_val = os.environ.get(f'_EDGEDB_BUILDMETA_{prop}')
50 if env_val:
51 return env_val
52
53 try:
54 from . import _buildmeta # type: ignore
55 return getattr(_buildmeta, prop)
56 except (ImportError, AttributeError):
57 raise MetadataError(
58 f'could not find {prop} in EdgeDB distribution metadata') from None
59
60
61 def get_pg_config_path() -> pathlib.Path:
62 if devmode.is_in_dev_mode():
63 root = pathlib.Path(__file__).parent.parent
64 pg_config = (root / 'build' / 'postgres' /
65 'install' / 'bin' / 'pg_config').resolve()
66 if not pg_config.is_file():
67 try:
68 pg_config = pathlib.Path(
69 get_build_metadata_value('PG_CONFIG_PATH'))
70 except MetadataError:
71 pass
72
73 if not pg_config.is_file():
74 raise MetadataError('DEV mode: Could not find PostgreSQL build, '
75 'run `pip install -e .`')
76
77 else:
78 pg_config = pathlib.Path(
79 get_build_metadata_value('PG_CONFIG_PATH'))
80
81 if not pg_config.is_file():
82 raise MetadataError(
83 f'invalid pg_config path: {pg_config!r}: file does not exist '
84 f'or is not a regular file')
85
86 return pg_config
87
88
89 def get_runstate_path(data_dir: pathlib.Path) -> pathlib.Path:
90 if devmode.is_in_dev_mode():
91 return data_dir
92 else:
93 runstate_dir = get_build_metadata_value('RUNSTATE_DIR')
94 if runstate_dir is not None:
95 return pathlib.Path(runstate_dir)
96 else:
97 return data_dir
98
99
100 def get_shared_data_dir_path() -> pathlib.Path:
101 if devmode.is_in_dev_mode():
102 return devmode.get_dev_mode_cache_dir() # type: ignore[return-value]
103 else:
104 return pathlib.Path(get_build_metadata_value('SHARED_DATA_DIR'))
105
106
107 def hash_dirs(
108 dirs: Sequence[Tuple[str, str]],
109 *,
110 extra_files: Optional[Sequence[Union[str, pathlib.Path]]]=None
111 ) -> bytes:
112 def hash_dir(dirname, ext, paths):
113 with os.scandir(dirname) as it:
114 for entry in it:
115 if entry.is_file() and entry.name.endswith(ext):
116 paths.append(entry.path)
117 elif entry.is_dir():
118 hash_dir(entry.path, ext, paths)
119
120 paths: List[str] = []
121 for dirname, ext in dirs:
122 hash_dir(dirname, ext, paths)
123
124 if extra_files:
125 for extra_file in extra_files:
126 if isinstance(extra_file, pathlib.Path):
127 extra_file = str(extra_file.resolve())
128 paths.append(extra_file)
129
130 h = hashlib.sha1() # sha1 is the fastest one.
131 for path in sorted(paths):
132 with open(path, 'rb') as f:
133 h.update(f.read())
134
135 return h.digest()
136
137
138 def read_data_cache(
139 cache_key: bytes,
140 path: str,
141 *,
142 pickled: bool=True,
143 source_dir: Optional[pathlib.Path] = None,
144 ) -> Any:
145 if source_dir is None:
146 source_dir = get_shared_data_dir_path()
147 full_path = source_dir / path
148
149 if full_path.exists():
150 with open(full_path, 'rb') as f:
151 src_hash = f.read(len(cache_key))
152 if src_hash == cache_key or debug.flags.bootstrap_cache_yolo:
153 if pickled:
154 data = f.read()
155 try:
156 return pickle.loads(data)
157 except Exception:
158 logging.exception(f'could not unpickle {path}')
159 else:
160 return f.read()
161
162
163 def write_data_cache(
164 obj: Any,
165 cache_key: bytes,
166 path: str,
167 *,
168 pickled: bool = True,
169 target_dir: Optional[pathlib.Path] = None,
170 ):
171 if target_dir is None:
172 target_dir = get_shared_data_dir_path()
173 full_path = target_dir / path
174
175 try:
176 with tempfile.NamedTemporaryFile(
177 mode='wb', dir=full_path.parent, delete=False) as f:
178 f.write(cache_key)
179 if pickled:
180 pickle.dump(obj, file=f, protocol=pickle.HIGHEST_PROTOCOL)
181 else:
182 f.write(obj)
183 except Exception:
184 try:
185 os.unlink(f.name)
186 except OSError:
187 pass
188 finally:
189 raise
190 else:
191 os.rename(f.name, full_path)
192
193
194 def get_version() -> verutils.Version:
195 if devmode.is_in_dev_mode():
196 root = pathlib.Path(__file__).parent.parent.resolve()
197 version = verutils.parse_version(get_version_from_scm(root))
198 else:
199 vertuple: List[Any] = list(get_build_metadata_value('VERSION'))
200 vertuple[2] = verutils.VersionStage(vertuple[2])
201 version = verutils.Version(*vertuple)
202
203 return version
204
205
206 _version_dict: Optional[Mapping[str, Any]] = None
207
208
209 def get_version_dict() -> Mapping[str, Any]:
210 global _version_dict
211
212 if _version_dict is None:
213 ver = get_version()
214 _version_dict = {
215 'major': ver.major,
216 'minor': ver.minor,
217 'stage': ver.stage.name.lower(),
218 'stage_no': ver.stage_no,
219 'local': tuple(ver.local) if ver.local else (),
220 }
221
222 return _version_dict
223
224
225 _version_json: Optional[str] = None
226
227
228 def get_version_json() -> str:
229 global _version_json
230 if _version_json is None:
231 _version_json = json.dumps(get_version_dict())
232 return _version_json
233
234
235 def get_version_from_scm(root: pathlib.Path) -> str:
236 pretend = os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')
237 if pretend:
238 return pretend
239
240 posint = r'(0|[1-9]\d*)'
241 pep440_version_re = re.compile(
242 rf"""
243 ^
244 (?P<major>{posint})
245 \.
246 (?P<minor>{posint})
247 (
248 \.
249 (?P<micro>{posint})
250 )?
251 (
252 (?P<prekind>a|b|rc)
253 (?P<preval>{posint})
254 )?
255 $
256 """,
257 re.X,
258 )
259
260 proc = subprocess.run(
261 ['git', 'tag', '--list', 'v*'],
262 stdout=subprocess.PIPE,
263 universal_newlines=True,
264 check=True,
265 cwd=root,
266 )
267 all_tags = {
268 v[1:]
269 for v in proc.stdout.strip().split('\n')
270 if pep440_version_re.match(v[1:])
271 }
272
273 proc = subprocess.run(
274 ['git', 'tag', '--points-at', 'HEAD'],
275 stdout=subprocess.PIPE,
276 universal_newlines=True,
277 check=True,
278 cwd=root,
279 )
280 head_tags = {
281 v[1:]
282 for v in proc.stdout.strip().split('\n')
283 if pep440_version_re.match(v[1:])
284 }
285
286 if all_tags & head_tags:
287 tag = max(head_tags)
288 else:
289 tag = max(all_tags)
290
291 m = pep440_version_re.match(tag)
292 assert m is not None
293 major = m.group('major')
294 minor = m.group('minor')
295 micro = m.group('micro') or ''
296 microkind = '.' if micro else ''
297 prekind = m.group('prekind') or ''
298 preval = m.group('preval') or ''
299
300 if os.environ.get("EDGEDB_BUILD_IS_RELEASE"):
301 # Release build.
302 ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'
303 else:
304 # Dev/nightly build.
305 if prekind and preval:
306 preval = str(int(preval) + 1)
307 elif micro:
308 micro = str(int(micro) + 1)
309 else:
310 minor = str(int(minor) + 1)
311
312 incremented_ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'
313
314 proc = subprocess.run(
315 ['git', 'rev-list', '--count', 'HEAD'],
316 stdout=subprocess.PIPE,
317 universal_newlines=True,
318 check=True,
319 cwd=root,
320 )
321 commits_on_branch = proc.stdout.strip()
322 ver = f'{incremented_ver}.dev{commits_on_branch}'
323
324 proc = subprocess.run(
325 ['git', 'rev-parse', '--verify', '--quiet', 'HEAD'],
326 stdout=subprocess.PIPE,
327 universal_newlines=True,
328 check=True,
329 cwd=root,
330 )
331 commitish = proc.stdout.strip()
332
333 env = dict(os.environ)
334 env['TZ'] = 'UTC'
335 proc = subprocess.run(
336 ['git', 'show', '-s', '--format=%cd',
337 '--date=format-local:%Y%m%d%H', commitish],
338 stdout=subprocess.PIPE,
339 universal_newlines=True,
340 check=True,
341 cwd=root,
342 env=env,
343 )
344 rev_date = proc.stdout.strip()
345
346 catver = EDGEDB_CATALOG_VERSION
347 return f'{ver}+d{rev_date}.g{commitish[:9]}.cv{catver}'
348
349
350 def get_cache_src_dirs():
351 find_spec = importlib.util.find_spec
352
353 edgeql = pathlib.Path(find_spec('edb.edgeql').origin).parent
354 return (
355 (pathlib.Path(find_spec('edb.schema').origin).parent, '.py'),
356 (edgeql / 'compiler', '.py'),
357 (edgeql / 'parser', '.py'),
358 (pathlib.Path(find_spec('edb.lib').origin).parent, '.edgeql'),
359 (pathlib.Path(find_spec('edb.pgsql.metaschema').origin).parent, '.py'),
360 )
361
362
363 def get_default_tenant_id() -> str:
364 catver = EDGEDB_CATALOG_VERSION
365 return f'V{catver:x}'
366
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/edb/buildmeta.py b/edb/buildmeta.py
--- a/edb/buildmeta.py
+++ b/edb/buildmeta.py
@@ -38,7 +38,7 @@
# Increment this whenever the database layout or stdlib changes.
-EDGEDB_CATALOG_VERSION = 2021_10_19_00_00
+EDGEDB_CATALOG_VERSION = 2021_10_24_00_00
class MetadataError(Exception):
|
{"golden_diff": "diff --git a/edb/buildmeta.py b/edb/buildmeta.py\n--- a/edb/buildmeta.py\n+++ b/edb/buildmeta.py\n@@ -38,7 +38,7 @@\n \n \n # Increment this whenever the database layout or stdlib changes.\n-EDGEDB_CATALOG_VERSION = 2021_10_19_00_00\n+EDGEDB_CATALOG_VERSION = 2021_10_24_00_00\n \n \n class MetadataError(Exception):\n", "issue": "Consider implementing ++ operator for JSON\nPossible behaviour:\r\n\r\n```\r\ndb> SELECT to_json('[1,2]') ++ to_json('[3]') = to_json('[1,2,3]');\r\ndb> SELECT to_json('{\"a\": 1}') ++ to_json('{\"b\":2}') = to_json('{\"a\":1, \"b\":2}');\r\ndb> SELECT to_json('\"123\"') ++ to_json('\"456\"') = to_json('\"123456\"');\r\n```\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport hashlib\nimport importlib.util\nimport json\nimport logging\nimport os\nimport pathlib\nimport pickle\nimport re\nimport subprocess\nimport tempfile\n\n\nfrom edb.common import debug\nfrom edb.common import devmode\nfrom edb.common import verutils\n\n\n# Increment this whenever the database layout or stdlib changes.\nEDGEDB_CATALOG_VERSION = 2021_10_19_00_00\n\n\nclass MetadataError(Exception):\n pass\n\n\ndef get_build_metadata_value(prop: str) -> str:\n env_val = os.environ.get(f'_EDGEDB_BUILDMETA_{prop}')\n if env_val:\n return env_val\n\n try:\n from . import _buildmeta # type: ignore\n return getattr(_buildmeta, prop)\n except (ImportError, AttributeError):\n raise MetadataError(\n f'could not find {prop} in EdgeDB distribution metadata') from None\n\n\ndef get_pg_config_path() -> pathlib.Path:\n if devmode.is_in_dev_mode():\n root = pathlib.Path(__file__).parent.parent\n pg_config = (root / 'build' / 'postgres' /\n 'install' / 'bin' / 'pg_config').resolve()\n if not pg_config.is_file():\n try:\n pg_config = pathlib.Path(\n get_build_metadata_value('PG_CONFIG_PATH'))\n except MetadataError:\n pass\n\n if not pg_config.is_file():\n raise MetadataError('DEV mode: Could not find PostgreSQL build, '\n 'run `pip install -e .`')\n\n else:\n pg_config = pathlib.Path(\n get_build_metadata_value('PG_CONFIG_PATH'))\n\n if not pg_config.is_file():\n raise MetadataError(\n f'invalid pg_config path: {pg_config!r}: file does not exist '\n f'or is not a regular file')\n\n return pg_config\n\n\ndef get_runstate_path(data_dir: pathlib.Path) -> pathlib.Path:\n if devmode.is_in_dev_mode():\n return data_dir\n else:\n runstate_dir = get_build_metadata_value('RUNSTATE_DIR')\n if runstate_dir is not None:\n return pathlib.Path(runstate_dir)\n else:\n return data_dir\n\n\ndef get_shared_data_dir_path() -> pathlib.Path:\n if devmode.is_in_dev_mode():\n return devmode.get_dev_mode_cache_dir() # type: ignore[return-value]\n else:\n return pathlib.Path(get_build_metadata_value('SHARED_DATA_DIR'))\n\n\ndef hash_dirs(\n dirs: Sequence[Tuple[str, str]],\n *,\n extra_files: Optional[Sequence[Union[str, pathlib.Path]]]=None\n) -> bytes:\n def hash_dir(dirname, ext, paths):\n with os.scandir(dirname) as it:\n for entry in it:\n if entry.is_file() and entry.name.endswith(ext):\n paths.append(entry.path)\n elif entry.is_dir():\n hash_dir(entry.path, ext, paths)\n\n paths: List[str] = []\n for dirname, ext in dirs:\n hash_dir(dirname, ext, paths)\n\n if extra_files:\n for extra_file in extra_files:\n if isinstance(extra_file, pathlib.Path):\n extra_file = str(extra_file.resolve())\n paths.append(extra_file)\n\n h = hashlib.sha1() # sha1 is the fastest one.\n for path in sorted(paths):\n with open(path, 'rb') as f:\n h.update(f.read())\n\n return h.digest()\n\n\ndef read_data_cache(\n cache_key: bytes,\n path: str,\n *,\n pickled: bool=True,\n source_dir: Optional[pathlib.Path] = None,\n) -> Any:\n if source_dir is None:\n source_dir = get_shared_data_dir_path()\n full_path = source_dir / path\n\n if full_path.exists():\n with open(full_path, 'rb') as f:\n src_hash = f.read(len(cache_key))\n if src_hash == cache_key or debug.flags.bootstrap_cache_yolo:\n if pickled:\n data = f.read()\n try:\n return pickle.loads(data)\n except Exception:\n logging.exception(f'could not unpickle {path}')\n else:\n return f.read()\n\n\ndef write_data_cache(\n obj: Any,\n cache_key: bytes,\n path: str,\n *,\n pickled: bool = True,\n target_dir: Optional[pathlib.Path] = None,\n):\n if target_dir is None:\n target_dir = get_shared_data_dir_path()\n full_path = target_dir / path\n\n try:\n with tempfile.NamedTemporaryFile(\n mode='wb', dir=full_path.parent, delete=False) as f:\n f.write(cache_key)\n if pickled:\n pickle.dump(obj, file=f, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n f.write(obj)\n except Exception:\n try:\n os.unlink(f.name)\n except OSError:\n pass\n finally:\n raise\n else:\n os.rename(f.name, full_path)\n\n\ndef get_version() -> verutils.Version:\n if devmode.is_in_dev_mode():\n root = pathlib.Path(__file__).parent.parent.resolve()\n version = verutils.parse_version(get_version_from_scm(root))\n else:\n vertuple: List[Any] = list(get_build_metadata_value('VERSION'))\n vertuple[2] = verutils.VersionStage(vertuple[2])\n version = verutils.Version(*vertuple)\n\n return version\n\n\n_version_dict: Optional[Mapping[str, Any]] = None\n\n\ndef get_version_dict() -> Mapping[str, Any]:\n global _version_dict\n\n if _version_dict is None:\n ver = get_version()\n _version_dict = {\n 'major': ver.major,\n 'minor': ver.minor,\n 'stage': ver.stage.name.lower(),\n 'stage_no': ver.stage_no,\n 'local': tuple(ver.local) if ver.local else (),\n }\n\n return _version_dict\n\n\n_version_json: Optional[str] = None\n\n\ndef get_version_json() -> str:\n global _version_json\n if _version_json is None:\n _version_json = json.dumps(get_version_dict())\n return _version_json\n\n\ndef get_version_from_scm(root: pathlib.Path) -> str:\n pretend = os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')\n if pretend:\n return pretend\n\n posint = r'(0|[1-9]\\d*)'\n pep440_version_re = re.compile(\n rf\"\"\"\n ^\n (?P<major>{posint})\n \\.\n (?P<minor>{posint})\n (\n \\.\n (?P<micro>{posint})\n )?\n (\n (?P<prekind>a|b|rc)\n (?P<preval>{posint})\n )?\n $\n \"\"\",\n re.X,\n )\n\n proc = subprocess.run(\n ['git', 'tag', '--list', 'v*'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n all_tags = {\n v[1:]\n for v in proc.stdout.strip().split('\\n')\n if pep440_version_re.match(v[1:])\n }\n\n proc = subprocess.run(\n ['git', 'tag', '--points-at', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n head_tags = {\n v[1:]\n for v in proc.stdout.strip().split('\\n')\n if pep440_version_re.match(v[1:])\n }\n\n if all_tags & head_tags:\n tag = max(head_tags)\n else:\n tag = max(all_tags)\n\n m = pep440_version_re.match(tag)\n assert m is not None\n major = m.group('major')\n minor = m.group('minor')\n micro = m.group('micro') or ''\n microkind = '.' if micro else ''\n prekind = m.group('prekind') or ''\n preval = m.group('preval') or ''\n\n if os.environ.get(\"EDGEDB_BUILD_IS_RELEASE\"):\n # Release build.\n ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'\n else:\n # Dev/nightly build.\n if prekind and preval:\n preval = str(int(preval) + 1)\n elif micro:\n micro = str(int(micro) + 1)\n else:\n minor = str(int(minor) + 1)\n\n incremented_ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'\n\n proc = subprocess.run(\n ['git', 'rev-list', '--count', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n commits_on_branch = proc.stdout.strip()\n ver = f'{incremented_ver}.dev{commits_on_branch}'\n\n proc = subprocess.run(\n ['git', 'rev-parse', '--verify', '--quiet', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n commitish = proc.stdout.strip()\n\n env = dict(os.environ)\n env['TZ'] = 'UTC'\n proc = subprocess.run(\n ['git', 'show', '-s', '--format=%cd',\n '--date=format-local:%Y%m%d%H', commitish],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n env=env,\n )\n rev_date = proc.stdout.strip()\n\n catver = EDGEDB_CATALOG_VERSION\n return f'{ver}+d{rev_date}.g{commitish[:9]}.cv{catver}'\n\n\ndef get_cache_src_dirs():\n find_spec = importlib.util.find_spec\n\n edgeql = pathlib.Path(find_spec('edb.edgeql').origin).parent\n return (\n (pathlib.Path(find_spec('edb.schema').origin).parent, '.py'),\n (edgeql / 'compiler', '.py'),\n (edgeql / 'parser', '.py'),\n (pathlib.Path(find_spec('edb.lib').origin).parent, '.edgeql'),\n (pathlib.Path(find_spec('edb.pgsql.metaschema').origin).parent, '.py'),\n )\n\n\ndef get_default_tenant_id() -> str:\n catver = EDGEDB_CATALOG_VERSION\n return f'V{catver:x}'\n", "path": "edb/buildmeta.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom __future__ import annotations\nfrom typing import *\n\nimport hashlib\nimport importlib.util\nimport json\nimport logging\nimport os\nimport pathlib\nimport pickle\nimport re\nimport subprocess\nimport tempfile\n\n\nfrom edb.common import debug\nfrom edb.common import devmode\nfrom edb.common import verutils\n\n\n# Increment this whenever the database layout or stdlib changes.\nEDGEDB_CATALOG_VERSION = 2021_10_24_00_00\n\n\nclass MetadataError(Exception):\n pass\n\n\ndef get_build_metadata_value(prop: str) -> str:\n env_val = os.environ.get(f'_EDGEDB_BUILDMETA_{prop}')\n if env_val:\n return env_val\n\n try:\n from . import _buildmeta # type: ignore\n return getattr(_buildmeta, prop)\n except (ImportError, AttributeError):\n raise MetadataError(\n f'could not find {prop} in EdgeDB distribution metadata') from None\n\n\ndef get_pg_config_path() -> pathlib.Path:\n if devmode.is_in_dev_mode():\n root = pathlib.Path(__file__).parent.parent\n pg_config = (root / 'build' / 'postgres' /\n 'install' / 'bin' / 'pg_config').resolve()\n if not pg_config.is_file():\n try:\n pg_config = pathlib.Path(\n get_build_metadata_value('PG_CONFIG_PATH'))\n except MetadataError:\n pass\n\n if not pg_config.is_file():\n raise MetadataError('DEV mode: Could not find PostgreSQL build, '\n 'run `pip install -e .`')\n\n else:\n pg_config = pathlib.Path(\n get_build_metadata_value('PG_CONFIG_PATH'))\n\n if not pg_config.is_file():\n raise MetadataError(\n f'invalid pg_config path: {pg_config!r}: file does not exist '\n f'or is not a regular file')\n\n return pg_config\n\n\ndef get_runstate_path(data_dir: pathlib.Path) -> pathlib.Path:\n if devmode.is_in_dev_mode():\n return data_dir\n else:\n runstate_dir = get_build_metadata_value('RUNSTATE_DIR')\n if runstate_dir is not None:\n return pathlib.Path(runstate_dir)\n else:\n return data_dir\n\n\ndef get_shared_data_dir_path() -> pathlib.Path:\n if devmode.is_in_dev_mode():\n return devmode.get_dev_mode_cache_dir() # type: ignore[return-value]\n else:\n return pathlib.Path(get_build_metadata_value('SHARED_DATA_DIR'))\n\n\ndef hash_dirs(\n dirs: Sequence[Tuple[str, str]],\n *,\n extra_files: Optional[Sequence[Union[str, pathlib.Path]]]=None\n) -> bytes:\n def hash_dir(dirname, ext, paths):\n with os.scandir(dirname) as it:\n for entry in it:\n if entry.is_file() and entry.name.endswith(ext):\n paths.append(entry.path)\n elif entry.is_dir():\n hash_dir(entry.path, ext, paths)\n\n paths: List[str] = []\n for dirname, ext in dirs:\n hash_dir(dirname, ext, paths)\n\n if extra_files:\n for extra_file in extra_files:\n if isinstance(extra_file, pathlib.Path):\n extra_file = str(extra_file.resolve())\n paths.append(extra_file)\n\n h = hashlib.sha1() # sha1 is the fastest one.\n for path in sorted(paths):\n with open(path, 'rb') as f:\n h.update(f.read())\n\n return h.digest()\n\n\ndef read_data_cache(\n cache_key: bytes,\n path: str,\n *,\n pickled: bool=True,\n source_dir: Optional[pathlib.Path] = None,\n) -> Any:\n if source_dir is None:\n source_dir = get_shared_data_dir_path()\n full_path = source_dir / path\n\n if full_path.exists():\n with open(full_path, 'rb') as f:\n src_hash = f.read(len(cache_key))\n if src_hash == cache_key or debug.flags.bootstrap_cache_yolo:\n if pickled:\n data = f.read()\n try:\n return pickle.loads(data)\n except Exception:\n logging.exception(f'could not unpickle {path}')\n else:\n return f.read()\n\n\ndef write_data_cache(\n obj: Any,\n cache_key: bytes,\n path: str,\n *,\n pickled: bool = True,\n target_dir: Optional[pathlib.Path] = None,\n):\n if target_dir is None:\n target_dir = get_shared_data_dir_path()\n full_path = target_dir / path\n\n try:\n with tempfile.NamedTemporaryFile(\n mode='wb', dir=full_path.parent, delete=False) as f:\n f.write(cache_key)\n if pickled:\n pickle.dump(obj, file=f, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n f.write(obj)\n except Exception:\n try:\n os.unlink(f.name)\n except OSError:\n pass\n finally:\n raise\n else:\n os.rename(f.name, full_path)\n\n\ndef get_version() -> verutils.Version:\n if devmode.is_in_dev_mode():\n root = pathlib.Path(__file__).parent.parent.resolve()\n version = verutils.parse_version(get_version_from_scm(root))\n else:\n vertuple: List[Any] = list(get_build_metadata_value('VERSION'))\n vertuple[2] = verutils.VersionStage(vertuple[2])\n version = verutils.Version(*vertuple)\n\n return version\n\n\n_version_dict: Optional[Mapping[str, Any]] = None\n\n\ndef get_version_dict() -> Mapping[str, Any]:\n global _version_dict\n\n if _version_dict is None:\n ver = get_version()\n _version_dict = {\n 'major': ver.major,\n 'minor': ver.minor,\n 'stage': ver.stage.name.lower(),\n 'stage_no': ver.stage_no,\n 'local': tuple(ver.local) if ver.local else (),\n }\n\n return _version_dict\n\n\n_version_json: Optional[str] = None\n\n\ndef get_version_json() -> str:\n global _version_json\n if _version_json is None:\n _version_json = json.dumps(get_version_dict())\n return _version_json\n\n\ndef get_version_from_scm(root: pathlib.Path) -> str:\n pretend = os.environ.get('SETUPTOOLS_SCM_PRETEND_VERSION')\n if pretend:\n return pretend\n\n posint = r'(0|[1-9]\\d*)'\n pep440_version_re = re.compile(\n rf\"\"\"\n ^\n (?P<major>{posint})\n \\.\n (?P<minor>{posint})\n (\n \\.\n (?P<micro>{posint})\n )?\n (\n (?P<prekind>a|b|rc)\n (?P<preval>{posint})\n )?\n $\n \"\"\",\n re.X,\n )\n\n proc = subprocess.run(\n ['git', 'tag', '--list', 'v*'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n all_tags = {\n v[1:]\n for v in proc.stdout.strip().split('\\n')\n if pep440_version_re.match(v[1:])\n }\n\n proc = subprocess.run(\n ['git', 'tag', '--points-at', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n head_tags = {\n v[1:]\n for v in proc.stdout.strip().split('\\n')\n if pep440_version_re.match(v[1:])\n }\n\n if all_tags & head_tags:\n tag = max(head_tags)\n else:\n tag = max(all_tags)\n\n m = pep440_version_re.match(tag)\n assert m is not None\n major = m.group('major')\n minor = m.group('minor')\n micro = m.group('micro') or ''\n microkind = '.' if micro else ''\n prekind = m.group('prekind') or ''\n preval = m.group('preval') or ''\n\n if os.environ.get(\"EDGEDB_BUILD_IS_RELEASE\"):\n # Release build.\n ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'\n else:\n # Dev/nightly build.\n if prekind and preval:\n preval = str(int(preval) + 1)\n elif micro:\n micro = str(int(micro) + 1)\n else:\n minor = str(int(minor) + 1)\n\n incremented_ver = f'{major}.{minor}{microkind}{micro}{prekind}{preval}'\n\n proc = subprocess.run(\n ['git', 'rev-list', '--count', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n commits_on_branch = proc.stdout.strip()\n ver = f'{incremented_ver}.dev{commits_on_branch}'\n\n proc = subprocess.run(\n ['git', 'rev-parse', '--verify', '--quiet', 'HEAD'],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n )\n commitish = proc.stdout.strip()\n\n env = dict(os.environ)\n env['TZ'] = 'UTC'\n proc = subprocess.run(\n ['git', 'show', '-s', '--format=%cd',\n '--date=format-local:%Y%m%d%H', commitish],\n stdout=subprocess.PIPE,\n universal_newlines=True,\n check=True,\n cwd=root,\n env=env,\n )\n rev_date = proc.stdout.strip()\n\n catver = EDGEDB_CATALOG_VERSION\n return f'{ver}+d{rev_date}.g{commitish[:9]}.cv{catver}'\n\n\ndef get_cache_src_dirs():\n find_spec = importlib.util.find_spec\n\n edgeql = pathlib.Path(find_spec('edb.edgeql').origin).parent\n return (\n (pathlib.Path(find_spec('edb.schema').origin).parent, '.py'),\n (edgeql / 'compiler', '.py'),\n (edgeql / 'parser', '.py'),\n (pathlib.Path(find_spec('edb.lib').origin).parent, '.edgeql'),\n (pathlib.Path(find_spec('edb.pgsql.metaschema').origin).parent, '.py'),\n )\n\n\ndef get_default_tenant_id() -> str:\n catver = EDGEDB_CATALOG_VERSION\n return f'V{catver:x}'\n", "path": "edb/buildmeta.py"}]}
| 3,801 | 115 |
gh_patches_debug_11681
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-494
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a file path provider
In the file providers would be nice to have a file_path provider who would return a path like ```/lorem/ipsum/lorem.pdf```.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/file/__init__.py`
Content:
```
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from collections import OrderedDict
4
5 from .. import BaseProvider
6 from ..lorem.la import Provider as WordProvider
7
8
9 class Provider(BaseProvider):
10 application_mime_types = (
11
12 "application/atom+xml", # Atom feeds
13 "application/ecmascript",
14 # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/javascript but with stricter processing rules)
15 "application/EDI-X12", # EDI X12 data; Defined in RFC 1767
16 "application/EDIFACT", # EDI EDIFACT data; Defined in RFC 1767
17 "application/json", # JavaScript Object Notation JSON; Defined in RFC 4627
18 "application/javascript", # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/ecmascript
19 # but with looser processing rules) It is not accepted in IE 8
20 # or earlier - text/javascript is accepted but it is defined as obsolete in RFC 4329.
21 # The "type" attribute of the <script> tag in HTML5 is optional and in practice
22 # omitting the media type of JavaScript programs is the most interoperable
23 # solution since all browsers have always assumed the correct
24 # default even before HTML5. "application/octet-stream", # Arbitrary binary data.[6] Generally speaking this type identifies files that are not associated with a specific application. Contrary to past assumptions by software packages such as Apache this is not a type that should be applied to unknown files. In such a case, a server or application should not indicate a content type, as it may be incorrect, but rather, should omit the type in order to allow the recipient to guess the type.[7]
25 "application/ogg", # Ogg, a multimedia bitstream container format; Defined in RFC 5334
26 "application/pdf", # Portable Document Format, PDF has been in use for document exchange
27 # on the Internet since 1993; Defined in RFC 3778
28 "application/postscript", # PostScript; Defined in RFC 2046
29 "application/rdf+xml", # Resource Description Framework; Defined by RFC 3870
30 "application/rss+xml", # RSS feeds
31 "application/soap+xml", # SOAP; Defined by RFC 3902
32 "application/font-woff", # Web Open Font Format; (candidate recommendation; use application/x-font-woff
33 # until standard is official)
34 "application/xhtml+xml", # XHTML; Defined by RFC 3236
35 "application/xml-dtd", # DTD files; Defined by RFC 3023
36 "application/xop+xml", # XOP
37 "application/zip", # ZIP archive files; Registered[8]
38 "application/gzip", # Gzip, Defined in RFC 6713
39 )
40
41 audio_mime_types = (
42 "audio/basic", # mulaw audio at 8 kHz, 1 channel; Defined in RFC 2046
43 "audio/L24", # 24bit Linear PCM audio at 8-48 kHz, 1-N channels; Defined in RFC 3190
44 "audio/mp4", # MP4 audio
45 "audio/mpeg", # MP3 or other MPEG audio; Defined in RFC 3003
46 "audio/ogg", # Ogg Vorbis, Speex, Flac and other audio; Defined in RFC 5334
47 "audio/vorbis", # Vorbis encoded audio; Defined in RFC 5215
48 "audio/vnd.rn-realaudio", # RealAudio; Documented in RealPlayer Help[9]
49 "audio/vnd.wave", # WAV audio; Defined in RFC 2361
50 "audio/webm", # WebM open media format
51 )
52
53 image_mime_types = (
54 "image/gif", # GIF image; Defined in RFC 2045 and RFC 2046
55 "image/jpeg", # JPEG JFIF image; Defined in RFC 2045 and RFC 2046
56 "image/pjpeg",
57 # JPEG JFIF image; Associated with Internet Explorer; Listed in ms775147(v=vs.85) - Progressive JPEG, initiated before global browser support for progressive JPEGs (Microsoft and Firefox).
58 "image/png", # Portable Network Graphics; Registered,[10] Defined in RFC 2083
59 "image/svg+xml", # SVG vector image; Defined in SVG Tiny 1.2 Specification Appendix M
60 "image/tiff", # Tag Image File Format (only for Baseline TIFF); Defined in RFC 3302
61 "image/vnd.microsoft.icon", # ICO image; Registered[11]
62 )
63
64 message_mime_types = (
65 "message/http", # Defined in RFC 2616
66 "message/imdn+xml", # IMDN Instant Message Disposition Notification; Defined in RFC 5438
67 "message/partial", # Email; Defined in RFC 2045 and RFC 2046
68 "message/rfc822", # Email; EML files, MIME files, MHT files, MHTML files; Defined in RFC 2045 and RFC 2046
69 )
70
71 model_mime_types = (
72 "model/example", # Defined in RFC 4735
73 "model/iges", # IGS files, IGES files; Defined in RFC 2077
74 "model/mesh", # MSH files, MESH files; Defined in RFC 2077, SILO files
75 "model/vrml", # WRL files, VRML files; Defined in RFC 2077
76 "model/x3d+binary", # X3D ISO standard for representing 3D computer graphics, X3DB binary files
77 "model/x3d+vrml", # X3D ISO standard for representing 3D computer graphics, X3DV VRML files
78 "model/x3d+xml", # X3D ISO standard for representing 3D computer graphics, X3D XML files
79 )
80
81 multipart_mime_types = (
82 "multipart/mixed", # MIME Email; Defined in RFC 2045 and RFC 2046
83 "multipart/alternative", # MIME Email; Defined in RFC 2045 and RFC 2046
84 "multipart/related", # MIME Email; Defined in RFC 2387 and used by MHTML (HTML mail)
85 "multipart/form-data", # MIME Webform; Defined in RFC 2388
86 "multipart/signed", # Defined in RFC 1847
87 "multipart/encrypted", # Defined in RFC 1847
88 )
89
90 text_mime_types = (
91 "text/cmd", # commands; subtype resident in Gecko browsers like Firefox 3.5
92 "text/css", # Cascading Style Sheets; Defined in RFC 2318
93 "text/csv", # Comma-separated values; Defined in RFC 4180
94 "text/html", # HTML; Defined in RFC 2854
95 "text/javascript",
96 # (Obsolete): JavaScript; Defined in and obsoleted by RFC 4329 in order to discourage its usage in favor of application/javascript. However, text/javascript is allowed in HTML 4 and 5 and, unlike application/javascript, has cross-browser support. The "type" attribute of the <script> tag in HTML5 is optional and there is no need to use it at all since all browsers have always assumed the correct default (even in HTML 4 where it was required by the specification).
97 "text/plain", # Textual data; Defined in RFC 2046 and RFC 3676
98 "text/vcard", # vCard (contact information); Defined in RFC 6350
99 "text/xml", # Extensible Markup Language; Defined in RFC 3023
100 )
101
102 video_mime_types = (
103 "video/mpeg", # MPEG-1 video with multiplexed audio; Defined in RFC 2045 and RFC 2046
104 "video/mp4", # MP4 video; Defined in RFC 4337
105 "video/ogg", # Ogg Theora or other video (with audio); Defined in RFC 5334
106 "video/quicktime", # QuickTime video; Registered[12]
107 "video/webm", # WebM Matroska-based open media format
108 "video/x-matroska", # Matroska open media format
109 "video/x-ms-wmv", # Windows Media Video; Documented in Microsoft KB 288102
110 "video/x-flv", # Flash video (FLV files)
111 )
112
113 mime_types = OrderedDict((
114 ('application', application_mime_types),
115 ('audio', audio_mime_types),
116 ('image', image_mime_types),
117 ('message', message_mime_types),
118 ('model', model_mime_types),
119 ('multipart', multipart_mime_types),
120 ('text', text_mime_types),
121 ('video', video_mime_types),
122 ))
123
124 audio_file_extensions = (
125 "flac",
126 "mp3",
127 "wav",
128 )
129
130 image_file_extensions = (
131 "bmp",
132 "gif",
133 "jpeg",
134 "jpg",
135 "png",
136 "tiff",
137 )
138
139 text_file_extensions = (
140 "css",
141 "csv",
142 "html",
143 "js",
144 "json",
145 "txt",
146 )
147
148 video_file_extensions = (
149 "mp4",
150 "avi",
151 "mov",
152 "webm",
153 )
154
155 office_file_extensions = (
156 "doc", # legacy MS Word
157 "docx", # MS Word
158 "xls", # legacy MS Excel
159 "xlsx", # MS Excel
160 "ppt", # legacy MS PowerPoint
161 "pptx", # MS PowerPoint
162 "odt", # LibreOffice document
163 "ods", # LibreOffice spreadsheet
164 "odp", # LibreOffice presentation
165 "pages", # Apple Pages
166 "numbers", # Apple Numbers
167 "key", # Apple Keynote
168 "pdf", # Portable Document Format
169 )
170
171 file_extensions = OrderedDict((
172 ("audio", audio_file_extensions),
173 ("image", image_file_extensions),
174 ("office", office_file_extensions),
175 ("text", text_file_extensions),
176 ("video", video_file_extensions),
177 ))
178
179 @classmethod
180 def mime_type(cls, category=None):
181 """
182 :param category: application|audio|image|message|model|multipart|text|video
183 """
184 category = category if category else cls.random_element(list(cls.mime_types.keys()))
185 return cls.random_element(cls.mime_types[category])
186
187 @classmethod
188 def file_name(cls, category=None, extension=None):
189 """
190 :param category: audio|image|office|text|video
191 :param extension: file extension
192 """
193 extension = extension if extension else cls.file_extension(category)
194 filename = WordProvider.word()
195 return '{0}.{1}'.format(filename, extension)
196
197 @classmethod
198 def file_extension(cls, category=None):
199 """
200 :param category: audio|image|office|text|video
201 """
202 category = category if category else cls.random_element(list(cls.file_extensions.keys()))
203 return cls.random_element(cls.file_extensions[category])
204
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/file/__init__.py b/faker/providers/file/__init__.py
--- a/faker/providers/file/__init__.py
+++ b/faker/providers/file/__init__.py
@@ -201,3 +201,16 @@
"""
category = category if category else cls.random_element(list(cls.file_extensions.keys()))
return cls.random_element(cls.file_extensions[category])
+
+ @classmethod
+ def file_path(cls, depth=1, category=None, extension=None):
+ """
+ :param category: audio|image|office|text|video
+ :param extension: file extension
+ :param depth: depth of the file (depth >= 0)
+ """
+ file = Provider.file_name(category, extension)
+ path = "/{0}".format(file)
+ for d in range(0, depth):
+ path = "/{0}{1}".format(WordProvider.word(), path)
+ return path
|
{"golden_diff": "diff --git a/faker/providers/file/__init__.py b/faker/providers/file/__init__.py\n--- a/faker/providers/file/__init__.py\n+++ b/faker/providers/file/__init__.py\n@@ -201,3 +201,16 @@\n \"\"\"\n category = category if category else cls.random_element(list(cls.file_extensions.keys()))\n return cls.random_element(cls.file_extensions[category])\n+\n+ @classmethod\n+ def file_path(cls, depth=1, category=None, extension=None):\n+ \"\"\"\n+ :param category: audio|image|office|text|video\n+ :param extension: file extension\n+ :param depth: depth of the file (depth >= 0)\n+ \"\"\"\n+ file = Provider.file_name(category, extension)\n+ path = \"/{0}\".format(file)\n+ for d in range(0, depth):\n+ path = \"/{0}{1}\".format(WordProvider.word(), path)\n+ return path\n", "issue": "Add a file path provider\nIn the file providers would be nice to have a file_path provider who would return a path like ```/lorem/ipsum/lorem.pdf```.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\nfrom ..lorem.la import Provider as WordProvider\n\n\nclass Provider(BaseProvider):\n application_mime_types = (\n\n \"application/atom+xml\", # Atom feeds\n \"application/ecmascript\",\n # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/javascript but with stricter processing rules)\n \"application/EDI-X12\", # EDI X12 data; Defined in RFC 1767\n \"application/EDIFACT\", # EDI EDIFACT data; Defined in RFC 1767\n \"application/json\", # JavaScript Object Notation JSON; Defined in RFC 4627\n \"application/javascript\", # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/ecmascript\n # but with looser processing rules) It is not accepted in IE 8\n # or earlier - text/javascript is accepted but it is defined as obsolete in RFC 4329.\n # The \"type\" attribute of the <script> tag in HTML5 is optional and in practice\n # omitting the media type of JavaScript programs is the most interoperable\n # solution since all browsers have always assumed the correct\n # default even before HTML5. \"application/octet-stream\", # Arbitrary binary data.[6] Generally speaking this type identifies files that are not associated with a specific application. Contrary to past assumptions by software packages such as Apache this is not a type that should be applied to unknown files. In such a case, a server or application should not indicate a content type, as it may be incorrect, but rather, should omit the type in order to allow the recipient to guess the type.[7]\n \"application/ogg\", # Ogg, a multimedia bitstream container format; Defined in RFC 5334\n \"application/pdf\", # Portable Document Format, PDF has been in use for document exchange\n # on the Internet since 1993; Defined in RFC 3778\n \"application/postscript\", # PostScript; Defined in RFC 2046\n \"application/rdf+xml\", # Resource Description Framework; Defined by RFC 3870\n \"application/rss+xml\", # RSS feeds\n \"application/soap+xml\", # SOAP; Defined by RFC 3902\n \"application/font-woff\", # Web Open Font Format; (candidate recommendation; use application/x-font-woff\n # until standard is official)\n \"application/xhtml+xml\", # XHTML; Defined by RFC 3236\n \"application/xml-dtd\", # DTD files; Defined by RFC 3023\n \"application/xop+xml\", # XOP\n \"application/zip\", # ZIP archive files; Registered[8]\n \"application/gzip\", # Gzip, Defined in RFC 6713\n )\n\n audio_mime_types = (\n \"audio/basic\", # mulaw audio at 8 kHz, 1 channel; Defined in RFC 2046\n \"audio/L24\", # 24bit Linear PCM audio at 8-48 kHz, 1-N channels; Defined in RFC 3190\n \"audio/mp4\", # MP4 audio\n \"audio/mpeg\", # MP3 or other MPEG audio; Defined in RFC 3003\n \"audio/ogg\", # Ogg Vorbis, Speex, Flac and other audio; Defined in RFC 5334\n \"audio/vorbis\", # Vorbis encoded audio; Defined in RFC 5215\n \"audio/vnd.rn-realaudio\", # RealAudio; Documented in RealPlayer Help[9]\n \"audio/vnd.wave\", # WAV audio; Defined in RFC 2361\n \"audio/webm\", # WebM open media format\n )\n\n image_mime_types = (\n \"image/gif\", # GIF image; Defined in RFC 2045 and RFC 2046\n \"image/jpeg\", # JPEG JFIF image; Defined in RFC 2045 and RFC 2046\n \"image/pjpeg\",\n # JPEG JFIF image; Associated with Internet Explorer; Listed in ms775147(v=vs.85) - Progressive JPEG, initiated before global browser support for progressive JPEGs (Microsoft and Firefox).\n \"image/png\", # Portable Network Graphics; Registered,[10] Defined in RFC 2083\n \"image/svg+xml\", # SVG vector image; Defined in SVG Tiny 1.2 Specification Appendix M\n \"image/tiff\", # Tag Image File Format (only for Baseline TIFF); Defined in RFC 3302\n \"image/vnd.microsoft.icon\", # ICO image; Registered[11]\n )\n\n message_mime_types = (\n \"message/http\", # Defined in RFC 2616\n \"message/imdn+xml\", # IMDN Instant Message Disposition Notification; Defined in RFC 5438\n \"message/partial\", # Email; Defined in RFC 2045 and RFC 2046\n \"message/rfc822\", # Email; EML files, MIME files, MHT files, MHTML files; Defined in RFC 2045 and RFC 2046\n )\n\n model_mime_types = (\n \"model/example\", # Defined in RFC 4735\n \"model/iges\", # IGS files, IGES files; Defined in RFC 2077\n \"model/mesh\", # MSH files, MESH files; Defined in RFC 2077, SILO files\n \"model/vrml\", # WRL files, VRML files; Defined in RFC 2077\n \"model/x3d+binary\", # X3D ISO standard for representing 3D computer graphics, X3DB binary files\n \"model/x3d+vrml\", # X3D ISO standard for representing 3D computer graphics, X3DV VRML files\n \"model/x3d+xml\", # X3D ISO standard for representing 3D computer graphics, X3D XML files\n )\n\n multipart_mime_types = (\n \"multipart/mixed\", # MIME Email; Defined in RFC 2045 and RFC 2046\n \"multipart/alternative\", # MIME Email; Defined in RFC 2045 and RFC 2046\n \"multipart/related\", # MIME Email; Defined in RFC 2387 and used by MHTML (HTML mail)\n \"multipart/form-data\", # MIME Webform; Defined in RFC 2388\n \"multipart/signed\", # Defined in RFC 1847\n \"multipart/encrypted\", # Defined in RFC 1847\n )\n\n text_mime_types = (\n \"text/cmd\", # commands; subtype resident in Gecko browsers like Firefox 3.5\n \"text/css\", # Cascading Style Sheets; Defined in RFC 2318\n \"text/csv\", # Comma-separated values; Defined in RFC 4180\n \"text/html\", # HTML; Defined in RFC 2854\n \"text/javascript\",\n # (Obsolete): JavaScript; Defined in and obsoleted by RFC 4329 in order to discourage its usage in favor of application/javascript. However, text/javascript is allowed in HTML 4 and 5 and, unlike application/javascript, has cross-browser support. The \"type\" attribute of the <script> tag in HTML5 is optional and there is no need to use it at all since all browsers have always assumed the correct default (even in HTML 4 where it was required by the specification).\n \"text/plain\", # Textual data; Defined in RFC 2046 and RFC 3676\n \"text/vcard\", # vCard (contact information); Defined in RFC 6350\n \"text/xml\", # Extensible Markup Language; Defined in RFC 3023\n )\n\n video_mime_types = (\n \"video/mpeg\", # MPEG-1 video with multiplexed audio; Defined in RFC 2045 and RFC 2046\n \"video/mp4\", # MP4 video; Defined in RFC 4337\n \"video/ogg\", # Ogg Theora or other video (with audio); Defined in RFC 5334\n \"video/quicktime\", # QuickTime video; Registered[12]\n \"video/webm\", # WebM Matroska-based open media format\n \"video/x-matroska\", # Matroska open media format\n \"video/x-ms-wmv\", # Windows Media Video; Documented in Microsoft KB 288102\n \"video/x-flv\", # Flash video (FLV files)\n )\n\n mime_types = OrderedDict((\n ('application', application_mime_types),\n ('audio', audio_mime_types),\n ('image', image_mime_types),\n ('message', message_mime_types),\n ('model', model_mime_types),\n ('multipart', multipart_mime_types),\n ('text', text_mime_types),\n ('video', video_mime_types),\n ))\n\n audio_file_extensions = (\n \"flac\",\n \"mp3\",\n \"wav\",\n )\n\n image_file_extensions = (\n \"bmp\",\n \"gif\",\n \"jpeg\",\n \"jpg\",\n \"png\",\n \"tiff\",\n )\n\n text_file_extensions = (\n \"css\",\n \"csv\",\n \"html\",\n \"js\",\n \"json\",\n \"txt\",\n )\n\n video_file_extensions = (\n \"mp4\",\n \"avi\",\n \"mov\",\n \"webm\",\n )\n\n office_file_extensions = (\n \"doc\", # legacy MS Word\n \"docx\", # MS Word\n \"xls\", # legacy MS Excel\n \"xlsx\", # MS Excel\n \"ppt\", # legacy MS PowerPoint\n \"pptx\", # MS PowerPoint\n \"odt\", # LibreOffice document\n \"ods\", # LibreOffice spreadsheet\n \"odp\", # LibreOffice presentation\n \"pages\", # Apple Pages\n \"numbers\", # Apple Numbers\n \"key\", # Apple Keynote\n \"pdf\", # Portable Document Format\n )\n\n file_extensions = OrderedDict((\n (\"audio\", audio_file_extensions),\n (\"image\", image_file_extensions),\n (\"office\", office_file_extensions),\n (\"text\", text_file_extensions),\n (\"video\", video_file_extensions),\n ))\n\n @classmethod\n def mime_type(cls, category=None):\n \"\"\"\n :param category: application|audio|image|message|model|multipart|text|video\n \"\"\"\n category = category if category else cls.random_element(list(cls.mime_types.keys()))\n return cls.random_element(cls.mime_types[category])\n\n @classmethod\n def file_name(cls, category=None, extension=None):\n \"\"\"\n :param category: audio|image|office|text|video\n :param extension: file extension\n \"\"\"\n extension = extension if extension else cls.file_extension(category)\n filename = WordProvider.word()\n return '{0}.{1}'.format(filename, extension)\n\n @classmethod\n def file_extension(cls, category=None):\n \"\"\"\n :param category: audio|image|office|text|video\n \"\"\"\n category = category if category else cls.random_element(list(cls.file_extensions.keys()))\n return cls.random_element(cls.file_extensions[category])\n", "path": "faker/providers/file/__init__.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\nfrom ..lorem.la import Provider as WordProvider\n\n\nclass Provider(BaseProvider):\n application_mime_types = (\n\n \"application/atom+xml\", # Atom feeds\n \"application/ecmascript\",\n # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/javascript but with stricter processing rules)\n \"application/EDI-X12\", # EDI X12 data; Defined in RFC 1767\n \"application/EDIFACT\", # EDI EDIFACT data; Defined in RFC 1767\n \"application/json\", # JavaScript Object Notation JSON; Defined in RFC 4627\n \"application/javascript\", # ECMAScript/JavaScript; Defined in RFC 4329 (equivalent to application/ecmascript\n # but with looser processing rules) It is not accepted in IE 8\n # or earlier - text/javascript is accepted but it is defined as obsolete in RFC 4329.\n # The \"type\" attribute of the <script> tag in HTML5 is optional and in practice\n # omitting the media type of JavaScript programs is the most interoperable\n # solution since all browsers have always assumed the correct\n # default even before HTML5. \"application/octet-stream\", # Arbitrary binary data.[6] Generally speaking this type identifies files that are not associated with a specific application. Contrary to past assumptions by software packages such as Apache this is not a type that should be applied to unknown files. In such a case, a server or application should not indicate a content type, as it may be incorrect, but rather, should omit the type in order to allow the recipient to guess the type.[7]\n \"application/ogg\", # Ogg, a multimedia bitstream container format; Defined in RFC 5334\n \"application/pdf\", # Portable Document Format, PDF has been in use for document exchange\n # on the Internet since 1993; Defined in RFC 3778\n \"application/postscript\", # PostScript; Defined in RFC 2046\n \"application/rdf+xml\", # Resource Description Framework; Defined by RFC 3870\n \"application/rss+xml\", # RSS feeds\n \"application/soap+xml\", # SOAP; Defined by RFC 3902\n \"application/font-woff\", # Web Open Font Format; (candidate recommendation; use application/x-font-woff\n # until standard is official)\n \"application/xhtml+xml\", # XHTML; Defined by RFC 3236\n \"application/xml-dtd\", # DTD files; Defined by RFC 3023\n \"application/xop+xml\", # XOP\n \"application/zip\", # ZIP archive files; Registered[8]\n \"application/gzip\", # Gzip, Defined in RFC 6713\n )\n\n audio_mime_types = (\n \"audio/basic\", # mulaw audio at 8 kHz, 1 channel; Defined in RFC 2046\n \"audio/L24\", # 24bit Linear PCM audio at 8-48 kHz, 1-N channels; Defined in RFC 3190\n \"audio/mp4\", # MP4 audio\n \"audio/mpeg\", # MP3 or other MPEG audio; Defined in RFC 3003\n \"audio/ogg\", # Ogg Vorbis, Speex, Flac and other audio; Defined in RFC 5334\n \"audio/vorbis\", # Vorbis encoded audio; Defined in RFC 5215\n \"audio/vnd.rn-realaudio\", # RealAudio; Documented in RealPlayer Help[9]\n \"audio/vnd.wave\", # WAV audio; Defined in RFC 2361\n \"audio/webm\", # WebM open media format\n )\n\n image_mime_types = (\n \"image/gif\", # GIF image; Defined in RFC 2045 and RFC 2046\n \"image/jpeg\", # JPEG JFIF image; Defined in RFC 2045 and RFC 2046\n \"image/pjpeg\",\n # JPEG JFIF image; Associated with Internet Explorer; Listed in ms775147(v=vs.85) - Progressive JPEG, initiated before global browser support for progressive JPEGs (Microsoft and Firefox).\n \"image/png\", # Portable Network Graphics; Registered,[10] Defined in RFC 2083\n \"image/svg+xml\", # SVG vector image; Defined in SVG Tiny 1.2 Specification Appendix M\n \"image/tiff\", # Tag Image File Format (only for Baseline TIFF); Defined in RFC 3302\n \"image/vnd.microsoft.icon\", # ICO image; Registered[11]\n )\n\n message_mime_types = (\n \"message/http\", # Defined in RFC 2616\n \"message/imdn+xml\", # IMDN Instant Message Disposition Notification; Defined in RFC 5438\n \"message/partial\", # Email; Defined in RFC 2045 and RFC 2046\n \"message/rfc822\", # Email; EML files, MIME files, MHT files, MHTML files; Defined in RFC 2045 and RFC 2046\n )\n\n model_mime_types = (\n \"model/example\", # Defined in RFC 4735\n \"model/iges\", # IGS files, IGES files; Defined in RFC 2077\n \"model/mesh\", # MSH files, MESH files; Defined in RFC 2077, SILO files\n \"model/vrml\", # WRL files, VRML files; Defined in RFC 2077\n \"model/x3d+binary\", # X3D ISO standard for representing 3D computer graphics, X3DB binary files\n \"model/x3d+vrml\", # X3D ISO standard for representing 3D computer graphics, X3DV VRML files\n \"model/x3d+xml\", # X3D ISO standard for representing 3D computer graphics, X3D XML files\n )\n\n multipart_mime_types = (\n \"multipart/mixed\", # MIME Email; Defined in RFC 2045 and RFC 2046\n \"multipart/alternative\", # MIME Email; Defined in RFC 2045 and RFC 2046\n \"multipart/related\", # MIME Email; Defined in RFC 2387 and used by MHTML (HTML mail)\n \"multipart/form-data\", # MIME Webform; Defined in RFC 2388\n \"multipart/signed\", # Defined in RFC 1847\n \"multipart/encrypted\", # Defined in RFC 1847\n )\n\n text_mime_types = (\n \"text/cmd\", # commands; subtype resident in Gecko browsers like Firefox 3.5\n \"text/css\", # Cascading Style Sheets; Defined in RFC 2318\n \"text/csv\", # Comma-separated values; Defined in RFC 4180\n \"text/html\", # HTML; Defined in RFC 2854\n \"text/javascript\",\n # (Obsolete): JavaScript; Defined in and obsoleted by RFC 4329 in order to discourage its usage in favor of application/javascript. However, text/javascript is allowed in HTML 4 and 5 and, unlike application/javascript, has cross-browser support. The \"type\" attribute of the <script> tag in HTML5 is optional and there is no need to use it at all since all browsers have always assumed the correct default (even in HTML 4 where it was required by the specification).\n \"text/plain\", # Textual data; Defined in RFC 2046 and RFC 3676\n \"text/vcard\", # vCard (contact information); Defined in RFC 6350\n \"text/xml\", # Extensible Markup Language; Defined in RFC 3023\n )\n\n video_mime_types = (\n \"video/mpeg\", # MPEG-1 video with multiplexed audio; Defined in RFC 2045 and RFC 2046\n \"video/mp4\", # MP4 video; Defined in RFC 4337\n \"video/ogg\", # Ogg Theora or other video (with audio); Defined in RFC 5334\n \"video/quicktime\", # QuickTime video; Registered[12]\n \"video/webm\", # WebM Matroska-based open media format\n \"video/x-matroska\", # Matroska open media format\n \"video/x-ms-wmv\", # Windows Media Video; Documented in Microsoft KB 288102\n \"video/x-flv\", # Flash video (FLV files)\n )\n\n mime_types = OrderedDict((\n ('application', application_mime_types),\n ('audio', audio_mime_types),\n ('image', image_mime_types),\n ('message', message_mime_types),\n ('model', model_mime_types),\n ('multipart', multipart_mime_types),\n ('text', text_mime_types),\n ('video', video_mime_types),\n ))\n\n audio_file_extensions = (\n \"flac\",\n \"mp3\",\n \"wav\",\n )\n\n image_file_extensions = (\n \"bmp\",\n \"gif\",\n \"jpeg\",\n \"jpg\",\n \"png\",\n \"tiff\",\n )\n\n text_file_extensions = (\n \"css\",\n \"csv\",\n \"html\",\n \"js\",\n \"json\",\n \"txt\",\n )\n\n video_file_extensions = (\n \"mp4\",\n \"avi\",\n \"mov\",\n \"webm\",\n )\n\n office_file_extensions = (\n \"doc\", # legacy MS Word\n \"docx\", # MS Word\n \"xls\", # legacy MS Excel\n \"xlsx\", # MS Excel\n \"ppt\", # legacy MS PowerPoint\n \"pptx\", # MS PowerPoint\n \"odt\", # LibreOffice document\n \"ods\", # LibreOffice spreadsheet\n \"odp\", # LibreOffice presentation\n \"pages\", # Apple Pages\n \"numbers\", # Apple Numbers\n \"key\", # Apple Keynote\n \"pdf\", # Portable Document Format\n )\n\n file_extensions = OrderedDict((\n (\"audio\", audio_file_extensions),\n (\"image\", image_file_extensions),\n (\"office\", office_file_extensions),\n (\"text\", text_file_extensions),\n (\"video\", video_file_extensions),\n ))\n\n @classmethod\n def mime_type(cls, category=None):\n \"\"\"\n :param category: application|audio|image|message|model|multipart|text|video\n \"\"\"\n category = category if category else cls.random_element(list(cls.mime_types.keys()))\n return cls.random_element(cls.mime_types[category])\n\n @classmethod\n def file_name(cls, category=None, extension=None):\n \"\"\"\n :param category: audio|image|office|text|video\n :param extension: file extension\n \"\"\"\n extension = extension if extension else cls.file_extension(category)\n filename = WordProvider.word()\n return '{0}.{1}'.format(filename, extension)\n\n @classmethod\n def file_extension(cls, category=None):\n \"\"\"\n :param category: audio|image|office|text|video\n \"\"\"\n category = category if category else cls.random_element(list(cls.file_extensions.keys()))\n return cls.random_element(cls.file_extensions[category])\n\n @classmethod\n def file_path(cls, depth=1, category=None, extension=None):\n \"\"\"\n :param category: audio|image|office|text|video\n :param extension: file extension\n :param depth: depth of the file (depth >= 0)\n \"\"\"\n file = Provider.file_name(category, extension)\n path = \"/{0}\".format(file)\n for d in range(0, depth):\n path = \"/{0}{1}\".format(WordProvider.word(), path)\n return path\n", "path": "faker/providers/file/__init__.py"}]}
| 3,356 | 214 |
gh_patches_debug_30758
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-1061
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Example PickleWriterDriver does not support URIs
### Expected behaviour
Using the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.
### Actual behaviour
The test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.
### Steps to reproduce the behaviour
- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`
- Run `pytest integration_tests/test_end_to_end.py`
- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.
### Environment information
- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`
- Docker image: `opendatacube/datacube-tests:latest`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/io_plugin/dcio_example/pickles.py`
Content:
```
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """ Example reader plugin
6 """
7 from contextlib import contextmanager
8 import pickle
9
10
11 PROTOCOL = 'file'
12 FORMAT = 'pickle'
13
14
15 def uri_split(uri):
16 loc = uri.find('://')
17 if loc < 0:
18 return uri, PROTOCOL
19 return uri[loc+3:], uri[:loc]
20
21
22 class PickleDataSource(object):
23 class BandDataSource(object):
24 def __init__(self, da):
25 self._da = da
26 self.nodata = da.nodata
27
28 @property
29 def crs(self):
30 return self._da.crs
31
32 @property
33 def transform(self):
34 return self._da.affine
35
36 @property
37 def dtype(self):
38 return self._da.dtype
39
40 @property
41 def shape(self):
42 return self._da.shape
43
44 def read(self, window=None, out_shape=None):
45 if window is None:
46 data = self._da.values
47 else:
48 rows, cols = [slice(*w) for w in window]
49 data = self._da.values[rows, cols]
50
51 if out_shape is None or out_shape == data.shape:
52 return data
53
54 raise NotImplementedError('Native reading not supported for this data source')
55
56 def __init__(self, band):
57 self._band = band
58 uri = band.uri
59 self._filename, protocol = uri_split(uri)
60
61 if protocol not in [PROTOCOL, 'pickle']:
62 raise ValueError('Expected file:// or pickle:// url')
63
64 @contextmanager
65 def open(self):
66 with open(self._filename, 'rb') as f:
67 ds = pickle.load(f)
68
69 yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))
70
71
72 class PickleReaderDriver(object):
73 def __init__(self):
74 self.name = 'PickleReader'
75 self.protocols = [PROTOCOL, 'pickle']
76 self.formats = [FORMAT]
77
78 def supports(self, protocol, fmt):
79 return (protocol in self.protocols and
80 fmt in self.formats)
81
82 def new_datasource(self, band):
83 return PickleDataSource(band)
84
85
86 def rdr_driver_init():
87 return PickleReaderDriver()
88
89
90 class PickleWriterDriver(object):
91 def __init__(self):
92 pass
93
94 @property
95 def aliases(self):
96 return ['pickles']
97
98 @property
99 def format(self):
100 return FORMAT
101
102 @property
103 def uri_scheme(self):
104 return PROTOCOL
105
106 def write_dataset_to_storage(self, dataset, filename,
107 global_attributes=None,
108 variable_params=None,
109 storage_config=None,
110 **kwargs):
111 with open(filename, 'wb') as f:
112 pickle.dump(dataset, f)
113 return {}
114
115
116 def writer_driver_init():
117 return PickleWriterDriver()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py
--- a/examples/io_plugin/dcio_example/pickles.py
+++ b/examples/io_plugin/dcio_example/pickles.py
@@ -4,9 +4,12 @@
# SPDX-License-Identifier: Apache-2.0
""" Example reader plugin
"""
-from contextlib import contextmanager
import pickle
+from contextlib import contextmanager
+from pathlib import Path
+from urllib.parse import urlsplit
+from datacube.utils.uris import normalise_path
PROTOCOL = 'file'
FORMAT = 'pickle'
@@ -103,12 +106,33 @@
def uri_scheme(self):
return PROTOCOL
- def write_dataset_to_storage(self, dataset, filename,
+ def mk_uri(self, file_path, storage_config):
+ """
+ Constructs a URI from the file_path and storage config.
+
+ A typical implementation should return f'{scheme}://{file_path}'
+
+ Example:
+ file_path = '/path/to/my_file.pickled'
+ storage_config = {'driver': 'pickles'}
+
+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'
+
+ :param Path file_path: The file path of the file to be converted into a URI.
+ :param dict storage_config: The dict holding the storage config found in the ingest definition.
+ :return: file_path as a URI that the Driver understands.
+ :rtype: str
+ """
+ return normalise_path(file_path).as_uri()
+
+ def write_dataset_to_storage(self, dataset, file_uri,
global_attributes=None,
variable_params=None,
storage_config=None,
**kwargs):
- with open(filename, 'wb') as f:
+ filepath = Path(urlsplit(file_uri).path)
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ with filepath.open('wb') as f:
pickle.dump(dataset, f)
return {}
|
{"golden_diff": "diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py\n--- a/examples/io_plugin/dcio_example/pickles.py\n+++ b/examples/io_plugin/dcio_example/pickles.py\n@@ -4,9 +4,12 @@\n # SPDX-License-Identifier: Apache-2.0\n \"\"\" Example reader plugin\n \"\"\"\n-from contextlib import contextmanager\n import pickle\n+from contextlib import contextmanager\n+from pathlib import Path\n+from urllib.parse import urlsplit\n \n+from datacube.utils.uris import normalise_path\n \n PROTOCOL = 'file'\n FORMAT = 'pickle'\n@@ -103,12 +106,33 @@\n def uri_scheme(self):\n return PROTOCOL\n \n- def write_dataset_to_storage(self, dataset, filename,\n+ def mk_uri(self, file_path, storage_config):\n+ \"\"\"\n+ Constructs a URI from the file_path and storage config.\n+\n+ A typical implementation should return f'{scheme}://{file_path}'\n+\n+ Example:\n+ file_path = '/path/to/my_file.pickled'\n+ storage_config = {'driver': 'pickles'}\n+\n+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'\n+\n+ :param Path file_path: The file path of the file to be converted into a URI.\n+ :param dict storage_config: The dict holding the storage config found in the ingest definition.\n+ :return: file_path as a URI that the Driver understands.\n+ :rtype: str\n+ \"\"\"\n+ return normalise_path(file_path).as_uri()\n+\n+ def write_dataset_to_storage(self, dataset, file_uri,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n- with open(filename, 'wb') as f:\n+ filepath = Path(urlsplit(file_uri).path)\n+ filepath.parent.mkdir(parents=True, exist_ok=True)\n+ with filepath.open('wb') as f:\n pickle.dump(dataset, f)\n return {}\n", "issue": "Example PickleWriterDriver does not support URIs\n### Expected behaviour\r\nUsing the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.\r\n\r\n### Actual behaviour\r\nThe test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.\r\n\r\n### Steps to reproduce the behaviour\r\n- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`\r\n- Run `pytest integration_tests/test_end_to_end.py`\r\n- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.\r\n\r\n### Environment information\r\n- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`\r\n- Docker image: `opendatacube/datacube-tests:latest`\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Example reader plugin\n\"\"\"\nfrom contextlib import contextmanager\nimport pickle\n\n\nPROTOCOL = 'file'\nFORMAT = 'pickle'\n\n\ndef uri_split(uri):\n loc = uri.find('://')\n if loc < 0:\n return uri, PROTOCOL\n return uri[loc+3:], uri[:loc]\n\n\nclass PickleDataSource(object):\n class BandDataSource(object):\n def __init__(self, da):\n self._da = da\n self.nodata = da.nodata\n\n @property\n def crs(self):\n return self._da.crs\n\n @property\n def transform(self):\n return self._da.affine\n\n @property\n def dtype(self):\n return self._da.dtype\n\n @property\n def shape(self):\n return self._da.shape\n\n def read(self, window=None, out_shape=None):\n if window is None:\n data = self._da.values\n else:\n rows, cols = [slice(*w) for w in window]\n data = self._da.values[rows, cols]\n\n if out_shape is None or out_shape == data.shape:\n return data\n\n raise NotImplementedError('Native reading not supported for this data source')\n\n def __init__(self, band):\n self._band = band\n uri = band.uri\n self._filename, protocol = uri_split(uri)\n\n if protocol not in [PROTOCOL, 'pickle']:\n raise ValueError('Expected file:// or pickle:// url')\n\n @contextmanager\n def open(self):\n with open(self._filename, 'rb') as f:\n ds = pickle.load(f)\n\n yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))\n\n\nclass PickleReaderDriver(object):\n def __init__(self):\n self.name = 'PickleReader'\n self.protocols = [PROTOCOL, 'pickle']\n self.formats = [FORMAT]\n\n def supports(self, protocol, fmt):\n return (protocol in self.protocols and\n fmt in self.formats)\n\n def new_datasource(self, band):\n return PickleDataSource(band)\n\n\ndef rdr_driver_init():\n return PickleReaderDriver()\n\n\nclass PickleWriterDriver(object):\n def __init__(self):\n pass\n\n @property\n def aliases(self):\n return ['pickles']\n\n @property\n def format(self):\n return FORMAT\n\n @property\n def uri_scheme(self):\n return PROTOCOL\n\n def write_dataset_to_storage(self, dataset, filename,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n with open(filename, 'wb') as f:\n pickle.dump(dataset, f)\n return {}\n\n\ndef writer_driver_init():\n return PickleWriterDriver()\n", "path": "examples/io_plugin/dcio_example/pickles.py"}], "after_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Example reader plugin\n\"\"\"\nimport pickle\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom urllib.parse import urlsplit\n\nfrom datacube.utils.uris import normalise_path\n\nPROTOCOL = 'file'\nFORMAT = 'pickle'\n\n\ndef uri_split(uri):\n loc = uri.find('://')\n if loc < 0:\n return uri, PROTOCOL\n return uri[loc+3:], uri[:loc]\n\n\nclass PickleDataSource(object):\n class BandDataSource(object):\n def __init__(self, da):\n self._da = da\n self.nodata = da.nodata\n\n @property\n def crs(self):\n return self._da.crs\n\n @property\n def transform(self):\n return self._da.affine\n\n @property\n def dtype(self):\n return self._da.dtype\n\n @property\n def shape(self):\n return self._da.shape\n\n def read(self, window=None, out_shape=None):\n if window is None:\n data = self._da.values\n else:\n rows, cols = [slice(*w) for w in window]\n data = self._da.values[rows, cols]\n\n if out_shape is None or out_shape == data.shape:\n return data\n\n raise NotImplementedError('Native reading not supported for this data source')\n\n def __init__(self, band):\n self._band = band\n uri = band.uri\n self._filename, protocol = uri_split(uri)\n\n if protocol not in [PROTOCOL, 'pickle']:\n raise ValueError('Expected file:// or pickle:// url')\n\n @contextmanager\n def open(self):\n with open(self._filename, 'rb') as f:\n ds = pickle.load(f)\n\n yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))\n\n\nclass PickleReaderDriver(object):\n def __init__(self):\n self.name = 'PickleReader'\n self.protocols = [PROTOCOL, 'pickle']\n self.formats = [FORMAT]\n\n def supports(self, protocol, fmt):\n return (protocol in self.protocols and\n fmt in self.formats)\n\n def new_datasource(self, band):\n return PickleDataSource(band)\n\n\ndef rdr_driver_init():\n return PickleReaderDriver()\n\n\nclass PickleWriterDriver(object):\n def __init__(self):\n pass\n\n @property\n def aliases(self):\n return ['pickles']\n\n @property\n def format(self):\n return FORMAT\n\n @property\n def uri_scheme(self):\n return PROTOCOL\n\n def mk_uri(self, file_path, storage_config):\n \"\"\"\n Constructs a URI from the file_path and storage config.\n\n A typical implementation should return f'{scheme}://{file_path}'\n\n Example:\n file_path = '/path/to/my_file.pickled'\n storage_config = {'driver': 'pickles'}\n\n mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'\n\n :param Path file_path: The file path of the file to be converted into a URI.\n :param dict storage_config: The dict holding the storage config found in the ingest definition.\n :return: file_path as a URI that the Driver understands.\n :rtype: str\n \"\"\"\n return normalise_path(file_path).as_uri()\n\n def write_dataset_to_storage(self, dataset, file_uri,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n filepath = Path(urlsplit(file_uri).path)\n filepath.parent.mkdir(parents=True, exist_ok=True)\n with filepath.open('wb') as f:\n pickle.dump(dataset, f)\n return {}\n\n\ndef writer_driver_init():\n return PickleWriterDriver()\n", "path": "examples/io_plugin/dcio_example/pickles.py"}]}
| 1,419 | 444 |
gh_patches_debug_34429
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-4704
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging: Can't retrieve the writerIdentity for a logging sink
In #4578 , support was added for creating a sink with a unique writerIdentity. It would appear that only the uniqueWriterIdentity boolean for logging.sinks.create was implemented; but retrieving the writerIdentity for an existing sink is not possible.
## API response (via gcloud)
```
$ gcloud logging sinks list --project xxxx --log-http
[...]
-- body start --
{
"sinks": [
{
"name": "xxxx",
"destination": "storage.googleapis.com/xxxx",
"filter": "xxxx",
"outputVersionFormt": "V2",
"writerIdentity": "serviceAccount:[email protected]"
}
]
}
-- body end --
```
## Simplified example code
```
from google.cloud import logging
logging_client = logging.Client(project_id)
sink = logging_client.sink(sink_name)
sink.reload()
```
## Resulting object
```
(Pdb) sink.
sink.__class__( sink.__hash__( sink.__setattr__( sink.destination
sink.__delattr__( sink.__init__( sink.__sizeof__( sink.exists(
sink.__dict__ sink.__le__( sink.__str__( sink.filter_
sink.__dir__( sink.__lt__( sink.__subclasshook__( sink.from_api_repr(
sink.__doc__ sink.__module__ sink.__weakref__ sink.full_name
sink.__eq__( sink.__ne__( sink._client sink.name
sink.__format__( sink.__new__( sink._require_client( sink.path
sink.__ge__( sink.__reduce__( sink.client sink.project
sink.__getattribute__( sink.__reduce_ex__( sink.create( sink.reload(
sink.__gt__( sink.__repr__( sink.delete( sink.update(
(Pdb) sink.__dict__
{'destination': 'storage.googleapis.com/logs-00006913-fake-customer', '_client': <google.cloud.logging.client.Client object at 0x7f1d2728e860>, 'name': 'asdf1234', 'filter_': '*'}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `logging/google/cloud/logging/sink.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define Stackdriver Logging API Sinks."""
16
17 from google.cloud.exceptions import NotFound
18
19
20 class Sink(object):
21 """Sinks represent filtered exports for log entries.
22
23 See
24 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
25
26 :type name: str
27 :param name: the name of the sink
28
29 :type filter_: str
30 :param filter_: (optional) the advanced logs filter expression defining
31 the entries exported by the sink.
32
33 :type destination: str
34 :param destination: destination URI for the entries exported by the sink.
35 If not passed, the instance should already exist, to
36 be refreshed via :meth:`reload`.
37
38 :type client: :class:`google.cloud.logging.client.Client`
39 :param client: A client which holds credentials and project configuration
40 for the sink (which requires a project).
41
42 :type unique_writer_identity: bool
43 :param unique_writer_identity: (Optional) determines the kind of
44 IAM identity returned as
45 writer_identity in the new sink.
46 """
47 def __init__(self, name, filter_=None, destination=None, client=None,
48 unique_writer_identity=False):
49 self.name = name
50 self.filter_ = filter_
51 self.destination = destination
52 self._client = client
53 self._unique_writer_identity = unique_writer_identity
54
55 @property
56 def client(self):
57 """Client bound to the sink."""
58 return self._client
59
60 @property
61 def project(self):
62 """Project bound to the sink."""
63 return self._client.project
64
65 @property
66 def full_name(self):
67 """Fully-qualified name used in sink APIs"""
68 return 'projects/%s/sinks/%s' % (self.project, self.name)
69
70 @property
71 def path(self):
72 """URL path for the sink's APIs"""
73 return '/%s' % (self.full_name)
74
75 @classmethod
76 def from_api_repr(cls, resource, client):
77 """Factory: construct a sink given its API representation
78
79 :type resource: dict
80 :param resource: sink resource representation returned from the API
81
82 :type client: :class:`google.cloud.logging.client.Client`
83 :param client: Client which holds credentials and project
84 configuration for the sink.
85
86 :rtype: :class:`google.cloud.logging.sink.Sink`
87 :returns: Sink parsed from ``resource``.
88 :raises: :class:`ValueError` if ``client`` is not ``None`` and the
89 project from the resource does not agree with the project
90 from the client.
91 """
92 sink_name = resource['name']
93 destination = resource['destination']
94 filter_ = resource.get('filter')
95 return cls(sink_name, filter_, destination, client=client)
96
97 def _require_client(self, client):
98 """Check client or verify over-ride.
99
100 :type client: :class:`~google.cloud.logging.client.Client` or
101 ``NoneType``
102 :param client: the client to use. If not passed, falls back to the
103 ``client`` stored on the current sink.
104
105 :rtype: :class:`google.cloud.logging.client.Client`
106 :returns: The client passed in or the currently bound client.
107 """
108 if client is None:
109 client = self._client
110 return client
111
112 def create(self, client=None):
113 """API call: create the sink via a PUT request
114
115 See
116 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
117
118 :type client: :class:`~google.cloud.logging.client.Client` or
119 ``NoneType``
120 :param client: the client to use. If not passed, falls back to the
121 ``client`` stored on the current sink.
122 """
123 client = self._require_client(client)
124 client.sinks_api.sink_create(
125 self.project, self.name, self.filter_, self.destination,
126 unique_writer_identity=self._unique_writer_identity,
127 )
128
129 def exists(self, client=None):
130 """API call: test for the existence of the sink via a GET request
131
132 See
133 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
134
135 :type client: :class:`~google.cloud.logging.client.Client` or
136 ``NoneType``
137 :param client: the client to use. If not passed, falls back to the
138 ``client`` stored on the current sink.
139
140 :rtype: bool
141 :returns: Boolean indicating existence of the sink.
142 """
143 client = self._require_client(client)
144
145 try:
146 client.sinks_api.sink_get(self.project, self.name)
147 except NotFound:
148 return False
149 else:
150 return True
151
152 def reload(self, client=None):
153 """API call: sync local sink configuration via a GET request
154
155 See
156 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
157
158 :type client: :class:`~google.cloud.logging.client.Client` or
159 ``NoneType``
160 :param client: the client to use. If not passed, falls back to the
161 ``client`` stored on the current sink.
162 """
163 client = self._require_client(client)
164 data = client.sinks_api.sink_get(self.project, self.name)
165 self.destination = data['destination']
166 self.filter_ = data.get('filter')
167
168 def update(self, client=None):
169 """API call: update sink configuration via a PUT request
170
171 See
172 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
173
174 :type client: :class:`~google.cloud.logging.client.Client` or
175 ``NoneType``
176 :param client: the client to use. If not passed, falls back to the
177 ``client`` stored on the current sink.
178 """
179 client = self._require_client(client)
180 client.sinks_api.sink_update(
181 self.project, self.name, self.filter_, self.destination)
182
183 def delete(self, client=None):
184 """API call: delete a sink via a DELETE request
185
186 See
187 https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete
188
189 :type client: :class:`~google.cloud.logging.client.Client` or
190 ``NoneType``
191 :param client: the client to use. If not passed, falls back to the
192 ``client`` stored on the current sink.
193 """
194 client = self._require_client(client)
195 client.sinks_api.sink_delete(self.project, self.name)
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/logging/google/cloud/logging/sink.py b/logging/google/cloud/logging/sink.py
--- a/logging/google/cloud/logging/sink.py
+++ b/logging/google/cloud/logging/sink.py
@@ -51,6 +51,7 @@
self.destination = destination
self._client = client
self._unique_writer_identity = unique_writer_identity
+ self._writer_identity = None
@property
def client(self):
@@ -72,6 +73,11 @@
"""URL path for the sink's APIs"""
return '/%s' % (self.full_name)
+ @property
+ def writer_identity(self):
+ """Identity used for exports via the sink"""
+ return self._writer_identity
+
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a sink given its API representation
@@ -92,7 +98,9 @@
sink_name = resource['name']
destination = resource['destination']
filter_ = resource.get('filter')
- return cls(sink_name, filter_, destination, client=client)
+ instance = cls(sink_name, filter_, destination, client=client)
+ instance._writer_identity = resource.get('writerIdentity')
+ return instance
def _require_client(self, client):
"""Check client or verify over-ride.
@@ -161,9 +169,10 @@
``client`` stored on the current sink.
"""
client = self._require_client(client)
- data = client.sinks_api.sink_get(self.project, self.name)
- self.destination = data['destination']
- self.filter_ = data.get('filter')
+ resource = client.sinks_api.sink_get(self.project, self.name)
+ self.destination = resource['destination']
+ self.filter_ = resource.get('filter')
+ self._writer_identity = resource.get('writerIdentity')
def update(self, client=None):
"""API call: update sink configuration via a PUT request
|
{"golden_diff": "diff --git a/logging/google/cloud/logging/sink.py b/logging/google/cloud/logging/sink.py\n--- a/logging/google/cloud/logging/sink.py\n+++ b/logging/google/cloud/logging/sink.py\n@@ -51,6 +51,7 @@\n self.destination = destination\n self._client = client\n self._unique_writer_identity = unique_writer_identity\n+ self._writer_identity = None\n \n @property\n def client(self):\n@@ -72,6 +73,11 @@\n \"\"\"URL path for the sink's APIs\"\"\"\n return '/%s' % (self.full_name)\n \n+ @property\n+ def writer_identity(self):\n+ \"\"\"Identity used for exports via the sink\"\"\"\n+ return self._writer_identity\n+\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a sink given its API representation\n@@ -92,7 +98,9 @@\n sink_name = resource['name']\n destination = resource['destination']\n filter_ = resource.get('filter')\n- return cls(sink_name, filter_, destination, client=client)\n+ instance = cls(sink_name, filter_, destination, client=client)\n+ instance._writer_identity = resource.get('writerIdentity')\n+ return instance\n \n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n@@ -161,9 +169,10 @@\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n- data = client.sinks_api.sink_get(self.project, self.name)\n- self.destination = data['destination']\n- self.filter_ = data.get('filter')\n+ resource = client.sinks_api.sink_get(self.project, self.name)\n+ self.destination = resource['destination']\n+ self.filter_ = resource.get('filter')\n+ self._writer_identity = resource.get('writerIdentity')\n \n def update(self, client=None):\n \"\"\"API call: update sink configuration via a PUT request\n", "issue": "Logging: Can't retrieve the writerIdentity for a logging sink\nIn #4578 , support was added for creating a sink with a unique writerIdentity. It would appear that only the uniqueWriterIdentity boolean for logging.sinks.create was implemented; but retrieving the writerIdentity for an existing sink is not possible.\r\n\r\n## API response (via gcloud)\r\n```\r\n$ gcloud logging sinks list --project xxxx --log-http\r\n[...]\r\n-- body start --\r\n{\r\n \"sinks\": [\r\n {\r\n \"name\": \"xxxx\",\r\n \"destination\": \"storage.googleapis.com/xxxx\",\r\n \"filter\": \"xxxx\",\r\n \"outputVersionFormt\": \"V2\",\r\n \"writerIdentity\": \"serviceAccount:[email protected]\"\r\n }\r\n ]\r\n}\r\n\r\n-- body end --\r\n```\r\n\r\n## Simplified example code\r\n```\r\nfrom google.cloud import logging\r\n\r\nlogging_client = logging.Client(project_id)\r\nsink = logging_client.sink(sink_name)\r\nsink.reload()\r\n```\r\n\r\n## Resulting object\r\n```\r\n(Pdb) sink.\r\nsink.__class__( sink.__hash__( sink.__setattr__( sink.destination\r\nsink.__delattr__( sink.__init__( sink.__sizeof__( sink.exists(\r\nsink.__dict__ sink.__le__( sink.__str__( sink.filter_\r\nsink.__dir__( sink.__lt__( sink.__subclasshook__( sink.from_api_repr(\r\nsink.__doc__ sink.__module__ sink.__weakref__ sink.full_name\r\nsink.__eq__( sink.__ne__( sink._client sink.name\r\nsink.__format__( sink.__new__( sink._require_client( sink.path\r\nsink.__ge__( sink.__reduce__( sink.client sink.project\r\nsink.__getattribute__( sink.__reduce_ex__( sink.create( sink.reload(\r\nsink.__gt__( sink.__repr__( sink.delete( sink.update(\r\n\r\n(Pdb) sink.__dict__\r\n{'destination': 'storage.googleapis.com/logs-00006913-fake-customer', '_client': <google.cloud.logging.client.Client object at 0x7f1d2728e860>, 'name': 'asdf1234', 'filter_': '*'}\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define Stackdriver Logging API Sinks.\"\"\"\n\nfrom google.cloud.exceptions import NotFound\n\n\nclass Sink(object):\n \"\"\"Sinks represent filtered exports for log entries.\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks\n\n :type name: str\n :param name: the name of the sink\n\n :type filter_: str\n :param filter_: (optional) the advanced logs filter expression defining\n the entries exported by the sink.\n\n :type destination: str\n :param destination: destination URI for the entries exported by the sink.\n If not passed, the instance should already exist, to\n be refreshed via :meth:`reload`.\n\n :type client: :class:`google.cloud.logging.client.Client`\n :param client: A client which holds credentials and project configuration\n for the sink (which requires a project).\n\n :type unique_writer_identity: bool\n :param unique_writer_identity: (Optional) determines the kind of\n IAM identity returned as\n writer_identity in the new sink.\n \"\"\"\n def __init__(self, name, filter_=None, destination=None, client=None,\n unique_writer_identity=False):\n self.name = name\n self.filter_ = filter_\n self.destination = destination\n self._client = client\n self._unique_writer_identity = unique_writer_identity\n\n @property\n def client(self):\n \"\"\"Client bound to the sink.\"\"\"\n return self._client\n\n @property\n def project(self):\n \"\"\"Project bound to the sink.\"\"\"\n return self._client.project\n\n @property\n def full_name(self):\n \"\"\"Fully-qualified name used in sink APIs\"\"\"\n return 'projects/%s/sinks/%s' % (self.project, self.name)\n\n @property\n def path(self):\n \"\"\"URL path for the sink's APIs\"\"\"\n return '/%s' % (self.full_name)\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a sink given its API representation\n\n :type resource: dict\n :param resource: sink resource representation returned from the API\n\n :type client: :class:`google.cloud.logging.client.Client`\n :param client: Client which holds credentials and project\n configuration for the sink.\n\n :rtype: :class:`google.cloud.logging.sink.Sink`\n :returns: Sink parsed from ``resource``.\n :raises: :class:`ValueError` if ``client`` is not ``None`` and the\n project from the resource does not agree with the project\n from the client.\n \"\"\"\n sink_name = resource['name']\n destination = resource['destination']\n filter_ = resource.get('filter')\n return cls(sink_name, filter_, destination, client=client)\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n\n :rtype: :class:`google.cloud.logging.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n def create(self, client=None):\n \"\"\"API call: create the sink via a PUT request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_create(\n self.project, self.name, self.filter_, self.destination,\n unique_writer_identity=self._unique_writer_identity,\n )\n\n def exists(self, client=None):\n \"\"\"API call: test for the existence of the sink via a GET request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n\n :rtype: bool\n :returns: Boolean indicating existence of the sink.\n \"\"\"\n client = self._require_client(client)\n\n try:\n client.sinks_api.sink_get(self.project, self.name)\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: sync local sink configuration via a GET request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n data = client.sinks_api.sink_get(self.project, self.name)\n self.destination = data['destination']\n self.filter_ = data.get('filter')\n\n def update(self, client=None):\n \"\"\"API call: update sink configuration via a PUT request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_update(\n self.project, self.name, self.filter_, self.destination)\n\n def delete(self, client=None):\n \"\"\"API call: delete a sink via a DELETE request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_delete(self.project, self.name)\n", "path": "logging/google/cloud/logging/sink.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define Stackdriver Logging API Sinks.\"\"\"\n\nfrom google.cloud.exceptions import NotFound\n\n\nclass Sink(object):\n \"\"\"Sinks represent filtered exports for log entries.\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks\n\n :type name: str\n :param name: the name of the sink\n\n :type filter_: str\n :param filter_: (optional) the advanced logs filter expression defining\n the entries exported by the sink.\n\n :type destination: str\n :param destination: destination URI for the entries exported by the sink.\n If not passed, the instance should already exist, to\n be refreshed via :meth:`reload`.\n\n :type client: :class:`google.cloud.logging.client.Client`\n :param client: A client which holds credentials and project configuration\n for the sink (which requires a project).\n\n :type unique_writer_identity: bool\n :param unique_writer_identity: (Optional) determines the kind of\n IAM identity returned as\n writer_identity in the new sink.\n \"\"\"\n def __init__(self, name, filter_=None, destination=None, client=None,\n unique_writer_identity=False):\n self.name = name\n self.filter_ = filter_\n self.destination = destination\n self._client = client\n self._unique_writer_identity = unique_writer_identity\n self._writer_identity = None\n\n @property\n def client(self):\n \"\"\"Client bound to the sink.\"\"\"\n return self._client\n\n @property\n def project(self):\n \"\"\"Project bound to the sink.\"\"\"\n return self._client.project\n\n @property\n def full_name(self):\n \"\"\"Fully-qualified name used in sink APIs\"\"\"\n return 'projects/%s/sinks/%s' % (self.project, self.name)\n\n @property\n def path(self):\n \"\"\"URL path for the sink's APIs\"\"\"\n return '/%s' % (self.full_name)\n\n @property\n def writer_identity(self):\n \"\"\"Identity used for exports via the sink\"\"\"\n return self._writer_identity\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a sink given its API representation\n\n :type resource: dict\n :param resource: sink resource representation returned from the API\n\n :type client: :class:`google.cloud.logging.client.Client`\n :param client: Client which holds credentials and project\n configuration for the sink.\n\n :rtype: :class:`google.cloud.logging.sink.Sink`\n :returns: Sink parsed from ``resource``.\n :raises: :class:`ValueError` if ``client`` is not ``None`` and the\n project from the resource does not agree with the project\n from the client.\n \"\"\"\n sink_name = resource['name']\n destination = resource['destination']\n filter_ = resource.get('filter')\n instance = cls(sink_name, filter_, destination, client=client)\n instance._writer_identity = resource.get('writerIdentity')\n return instance\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n\n :rtype: :class:`google.cloud.logging.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n def create(self, client=None):\n \"\"\"API call: create the sink via a PUT request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_create(\n self.project, self.name, self.filter_, self.destination,\n unique_writer_identity=self._unique_writer_identity,\n )\n\n def exists(self, client=None):\n \"\"\"API call: test for the existence of the sink via a GET request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n\n :rtype: bool\n :returns: Boolean indicating existence of the sink.\n \"\"\"\n client = self._require_client(client)\n\n try:\n client.sinks_api.sink_get(self.project, self.name)\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: sync local sink configuration via a GET request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n resource = client.sinks_api.sink_get(self.project, self.name)\n self.destination = resource['destination']\n self.filter_ = resource.get('filter')\n self._writer_identity = resource.get('writerIdentity')\n\n def update(self, client=None):\n \"\"\"API call: update sink configuration via a PUT request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_update(\n self.project, self.name, self.filter_, self.destination)\n\n def delete(self, client=None):\n \"\"\"API call: delete a sink via a DELETE request\n\n See\n https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete\n\n :type client: :class:`~google.cloud.logging.client.Client` or\n ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current sink.\n \"\"\"\n client = self._require_client(client)\n client.sinks_api.sink_delete(self.project, self.name)\n", "path": "logging/google/cloud/logging/sink.py"}]}
| 2,784 | 442 |
gh_patches_debug_244
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8940
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
__str__ method doesn't return a string.
This code sample:
>>> from wagtail.contrib.forms.models import FormSubmission
>>> FormSubmission.objects.count()
1
>>> FormSubmission.objects.first()
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "lib64/python3.10/site-packages/django/db/models/base.py", line 580, in __repr__
return "<%s: %s>" % (self.__class__.__name__, self)
TypeError: __str__ returned non-string (type dict)
This method:
https://github.com/wagtail/wagtail/blob/18ad15a18f8e533b858ccde7d060b9d4e85dcfd4/wagtail/contrib/forms/models.py#L61-L62
should be:
def __str__(self):
return f"{self.form_data}"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/contrib/forms/models.py`
Content:
```
1 import datetime
2 import os
3
4 from django.conf import settings
5 from django.core.serializers.json import DjangoJSONEncoder
6 from django.core.validators import validate_email
7 from django.db import models
8 from django.template.response import TemplateResponse
9 from django.utils.formats import date_format
10 from django.utils.translation import gettext_lazy as _
11
12 from wagtail.admin.mail import send_mail
13 from wagtail.admin.panels import FieldPanel
14 from wagtail.contrib.forms.utils import get_field_clean_name
15 from wagtail.models import Orderable, Page
16
17 from .forms import FormBuilder, WagtailAdminFormPageForm
18
19 FORM_FIELD_CHOICES = (
20 ("singleline", _("Single line text")),
21 ("multiline", _("Multi-line text")),
22 ("email", _("Email")),
23 ("number", _("Number")),
24 ("url", _("URL")),
25 ("checkbox", _("Checkbox")),
26 ("checkboxes", _("Checkboxes")),
27 ("dropdown", _("Drop down")),
28 ("multiselect", _("Multiple select")),
29 ("radio", _("Radio buttons")),
30 ("date", _("Date")),
31 ("datetime", _("Date/time")),
32 ("hidden", _("Hidden field")),
33 )
34
35
36 class AbstractFormSubmission(models.Model):
37 """
38 Data for a form submission.
39
40 You can create custom submission model based on this abstract model.
41 For example, if you need to save additional data or a reference to a user.
42 """
43
44 form_data = models.JSONField(encoder=DjangoJSONEncoder)
45 page = models.ForeignKey(Page, on_delete=models.CASCADE)
46
47 submit_time = models.DateTimeField(verbose_name=_("submit time"), auto_now_add=True)
48
49 def get_data(self):
50 """
51 Returns dict with form data.
52
53 You can override this method to add additional data.
54 """
55
56 return {
57 **self.form_data,
58 "submit_time": self.submit_time,
59 }
60
61 def __str__(self):
62 return self.form_data
63
64 class Meta:
65 abstract = True
66 verbose_name = _("form submission")
67 verbose_name_plural = _("form submissions")
68
69
70 class FormSubmission(AbstractFormSubmission):
71 """Data for a Form submission."""
72
73
74 class AbstractFormField(Orderable):
75 """
76 Database Fields required for building a Django Form field.
77 """
78
79 clean_name = models.CharField(
80 verbose_name=_("name"),
81 max_length=255,
82 blank=True,
83 default="",
84 help_text=_(
85 "Safe name of the form field, the label converted to ascii_snake_case"
86 ),
87 )
88 label = models.CharField(
89 verbose_name=_("label"),
90 max_length=255,
91 help_text=_("The label of the form field"),
92 )
93 field_type = models.CharField(
94 verbose_name=_("field type"), max_length=16, choices=FORM_FIELD_CHOICES
95 )
96 required = models.BooleanField(verbose_name=_("required"), default=True)
97 choices = models.TextField(
98 verbose_name=_("choices"),
99 blank=True,
100 help_text=_(
101 "Comma or new line separated list of choices. Only applicable in checkboxes, radio and dropdown."
102 ),
103 )
104 default_value = models.TextField(
105 verbose_name=_("default value"),
106 blank=True,
107 help_text=_(
108 "Default value. Comma or new line separated values supported for checkboxes."
109 ),
110 )
111 help_text = models.CharField(
112 verbose_name=_("help text"), max_length=255, blank=True
113 )
114
115 panels = [
116 FieldPanel("label"),
117 FieldPanel("help_text"),
118 FieldPanel("required"),
119 FieldPanel("field_type", classname="formbuilder-type"),
120 FieldPanel("choices", classname="formbuilder-choices"),
121 FieldPanel("default_value", classname="formbuilder-default"),
122 ]
123
124 def get_field_clean_name(self):
125 """
126 Prepare an ascii safe lower_snake_case variant of the field name to use as the field key.
127 This key is used to reference the field responses in the JSON store and as the field name in forms.
128 Called for new field creation, validation of duplicate labels and form previews.
129 When called, does not have access to the Page, nor its own id as the record is not yet created.
130 """
131
132 return get_field_clean_name(self.label)
133
134 def save(self, *args, **kwargs):
135 """
136 When new fields are created, generate a template safe ascii name to use as the
137 JSON storage reference for this field. Previously created fields will be updated
138 to use the legacy unidecode method via checks & _migrate_legacy_clean_name.
139 We do not want to update the clean name on any subsequent changes to the label
140 as this would invalidate any previously submitted data.
141 """
142
143 is_new = self.pk is None
144 if is_new:
145 clean_name = self.get_field_clean_name()
146 self.clean_name = clean_name
147
148 super().save(*args, **kwargs)
149
150 class Meta:
151 abstract = True
152 ordering = ["sort_order"]
153
154
155 class AbstractForm(Page):
156 """
157 A Form Page. Pages implementing a form should inherit from it
158 """
159
160 base_form_class = WagtailAdminFormPageForm
161
162 form_builder = FormBuilder
163
164 submissions_list_view_class = None
165
166 def __init__(self, *args, **kwargs):
167 super().__init__(*args, **kwargs)
168 if not hasattr(self, "landing_page_template"):
169 name, ext = os.path.splitext(self.template)
170 self.landing_page_template = name + "_landing" + ext
171
172 class Meta:
173 abstract = True
174
175 def get_form_fields(self):
176 """
177 Form page expects `form_fields` to be declared.
178 If you want to change backwards relation name,
179 you need to override this method.
180 """
181
182 return self.form_fields.all()
183
184 def get_data_fields(self):
185 """
186 Returns a list of tuples with (field_name, field_label).
187 """
188
189 data_fields = [
190 ("submit_time", _("Submission date")),
191 ]
192 data_fields += [
193 (field.clean_name, field.label) for field in self.get_form_fields()
194 ]
195
196 return data_fields
197
198 def get_form_class(self):
199 fb = self.form_builder(self.get_form_fields())
200 return fb.get_form_class()
201
202 def get_form_parameters(self):
203 return {}
204
205 def get_form(self, *args, **kwargs):
206 form_class = self.get_form_class()
207 form_params = self.get_form_parameters()
208 form_params.update(kwargs)
209
210 return form_class(*args, **form_params)
211
212 def get_landing_page_template(self, request, *args, **kwargs):
213 return self.landing_page_template
214
215 def get_submission_class(self):
216 """
217 Returns submission class.
218
219 You can override this method to provide custom submission class.
220 Your class must be inherited from AbstractFormSubmission.
221 """
222
223 return FormSubmission
224
225 def get_submissions_list_view_class(self):
226 from .views import SubmissionsListView
227
228 return self.submissions_list_view_class or SubmissionsListView
229
230 def process_form_submission(self, form):
231 """
232 Accepts form instance with submitted data, user and page.
233 Creates submission instance.
234
235 You can override this method if you want to have custom creation logic.
236 For example, if you want to save reference to a user.
237 """
238
239 return self.get_submission_class().objects.create(
240 form_data=form.cleaned_data,
241 page=self,
242 )
243
244 def render_landing_page(self, request, form_submission=None, *args, **kwargs):
245 """
246 Renders the landing page.
247
248 You can override this method to return a different HttpResponse as
249 landing page. E.g. you could return a redirect to a separate page.
250 """
251 context = self.get_context(request)
252 context["form_submission"] = form_submission
253 return TemplateResponse(
254 request, self.get_landing_page_template(request), context
255 )
256
257 def serve_submissions_list_view(self, request, *args, **kwargs):
258 """
259 Returns list submissions view for admin.
260
261 `list_submissions_view_class` can bse set to provide custom view class.
262 Your class must be inherited from SubmissionsListView.
263 """
264 view = self.get_submissions_list_view_class().as_view()
265 return view(request, form_page=self, *args, **kwargs)
266
267 def serve(self, request, *args, **kwargs):
268 if request.method == "POST":
269 form = self.get_form(
270 request.POST, request.FILES, page=self, user=request.user
271 )
272
273 if form.is_valid():
274 form_submission = self.process_form_submission(form)
275 return self.render_landing_page(
276 request, form_submission, *args, **kwargs
277 )
278 else:
279 form = self.get_form(page=self, user=request.user)
280
281 context = self.get_context(request)
282 context["form"] = form
283 return TemplateResponse(request, self.get_template(request), context)
284
285 preview_modes = [
286 ("form", _("Form")),
287 ("landing", _("Landing page")),
288 ]
289
290 def serve_preview(self, request, mode_name):
291 if mode_name == "landing":
292 return self.render_landing_page(request)
293 else:
294 return super().serve_preview(request, mode_name)
295
296
297 def validate_to_address(value):
298 for address in value.split(","):
299 validate_email(address.strip())
300
301
302 class AbstractEmailForm(AbstractForm):
303 """
304 A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it
305 """
306
307 to_address = models.CharField(
308 verbose_name=_("to address"),
309 max_length=255,
310 blank=True,
311 help_text=_(
312 "Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma."
313 ),
314 validators=[validate_to_address],
315 )
316 from_address = models.EmailField(
317 verbose_name=_("from address"), max_length=255, blank=True
318 )
319 subject = models.CharField(verbose_name=_("subject"), max_length=255, blank=True)
320
321 def process_form_submission(self, form):
322 submission = super().process_form_submission(form)
323 if self.to_address:
324 self.send_mail(form)
325 return submission
326
327 def send_mail(self, form):
328 addresses = [x.strip() for x in self.to_address.split(",")]
329 send_mail(
330 self.subject,
331 self.render_email(form),
332 addresses,
333 self.from_address,
334 )
335
336 def render_email(self, form):
337 content = []
338
339 cleaned_data = form.cleaned_data
340 for field in form:
341 if field.name not in cleaned_data:
342 continue
343
344 value = cleaned_data.get(field.name)
345
346 if isinstance(value, list):
347 value = ", ".join(value)
348
349 # Format dates and datetimes with SHORT_DATE(TIME)_FORMAT
350 if isinstance(value, datetime.datetime):
351 value = date_format(value, settings.SHORT_DATETIME_FORMAT)
352 elif isinstance(value, datetime.date):
353 value = date_format(value, settings.SHORT_DATE_FORMAT)
354
355 content.append("{}: {}".format(field.label, value))
356
357 return "\n".join(content)
358
359 class Meta:
360 abstract = True
361
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/contrib/forms/models.py b/wagtail/contrib/forms/models.py
--- a/wagtail/contrib/forms/models.py
+++ b/wagtail/contrib/forms/models.py
@@ -59,7 +59,7 @@
}
def __str__(self):
- return self.form_data
+ return f"{self.form_data}"
class Meta:
abstract = True
|
{"golden_diff": "diff --git a/wagtail/contrib/forms/models.py b/wagtail/contrib/forms/models.py\n--- a/wagtail/contrib/forms/models.py\n+++ b/wagtail/contrib/forms/models.py\n@@ -59,7 +59,7 @@\n }\n \n def __str__(self):\n- return self.form_data\n+ return f\"{self.form_data}\"\n \n class Meta:\n abstract = True\n", "issue": "__str__ method doesn't return a string.\nThis code sample:\r\n\r\n >>> from wagtail.contrib.forms.models import FormSubmission\r\n\r\n >>> FormSubmission.objects.count()\r\n 1\r\n >>> FormSubmission.objects.first()\r\n Traceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"lib64/python3.10/site-packages/django/db/models/base.py\", line 580, in __repr__\r\n return \"<%s: %s>\" % (self.__class__.__name__, self)\r\n TypeError: __str__ returned non-string (type dict)\r\n\r\nThis method:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/18ad15a18f8e533b858ccde7d060b9d4e85dcfd4/wagtail/contrib/forms/models.py#L61-L62\r\n\r\nshould be:\r\n\r\n def __str__(self):\r\n return f\"{self.form_data}\"\r\n\r\n \n", "before_files": [{"content": "import datetime\nimport os\n\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.template.response import TemplateResponse\nfrom django.utils.formats import date_format\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.mail import send_mail\nfrom wagtail.admin.panels import FieldPanel\nfrom wagtail.contrib.forms.utils import get_field_clean_name\nfrom wagtail.models import Orderable, Page\n\nfrom .forms import FormBuilder, WagtailAdminFormPageForm\n\nFORM_FIELD_CHOICES = (\n (\"singleline\", _(\"Single line text\")),\n (\"multiline\", _(\"Multi-line text\")),\n (\"email\", _(\"Email\")),\n (\"number\", _(\"Number\")),\n (\"url\", _(\"URL\")),\n (\"checkbox\", _(\"Checkbox\")),\n (\"checkboxes\", _(\"Checkboxes\")),\n (\"dropdown\", _(\"Drop down\")),\n (\"multiselect\", _(\"Multiple select\")),\n (\"radio\", _(\"Radio buttons\")),\n (\"date\", _(\"Date\")),\n (\"datetime\", _(\"Date/time\")),\n (\"hidden\", _(\"Hidden field\")),\n)\n\n\nclass AbstractFormSubmission(models.Model):\n \"\"\"\n Data for a form submission.\n\n You can create custom submission model based on this abstract model.\n For example, if you need to save additional data or a reference to a user.\n \"\"\"\n\n form_data = models.JSONField(encoder=DjangoJSONEncoder)\n page = models.ForeignKey(Page, on_delete=models.CASCADE)\n\n submit_time = models.DateTimeField(verbose_name=_(\"submit time\"), auto_now_add=True)\n\n def get_data(self):\n \"\"\"\n Returns dict with form data.\n\n You can override this method to add additional data.\n \"\"\"\n\n return {\n **self.form_data,\n \"submit_time\": self.submit_time,\n }\n\n def __str__(self):\n return self.form_data\n\n class Meta:\n abstract = True\n verbose_name = _(\"form submission\")\n verbose_name_plural = _(\"form submissions\")\n\n\nclass FormSubmission(AbstractFormSubmission):\n \"\"\"Data for a Form submission.\"\"\"\n\n\nclass AbstractFormField(Orderable):\n \"\"\"\n Database Fields required for building a Django Form field.\n \"\"\"\n\n clean_name = models.CharField(\n verbose_name=_(\"name\"),\n max_length=255,\n blank=True,\n default=\"\",\n help_text=_(\n \"Safe name of the form field, the label converted to ascii_snake_case\"\n ),\n )\n label = models.CharField(\n verbose_name=_(\"label\"),\n max_length=255,\n help_text=_(\"The label of the form field\"),\n )\n field_type = models.CharField(\n verbose_name=_(\"field type\"), max_length=16, choices=FORM_FIELD_CHOICES\n )\n required = models.BooleanField(verbose_name=_(\"required\"), default=True)\n choices = models.TextField(\n verbose_name=_(\"choices\"),\n blank=True,\n help_text=_(\n \"Comma or new line separated list of choices. Only applicable in checkboxes, radio and dropdown.\"\n ),\n )\n default_value = models.TextField(\n verbose_name=_(\"default value\"),\n blank=True,\n help_text=_(\n \"Default value. Comma or new line separated values supported for checkboxes.\"\n ),\n )\n help_text = models.CharField(\n verbose_name=_(\"help text\"), max_length=255, blank=True\n )\n\n panels = [\n FieldPanel(\"label\"),\n FieldPanel(\"help_text\"),\n FieldPanel(\"required\"),\n FieldPanel(\"field_type\", classname=\"formbuilder-type\"),\n FieldPanel(\"choices\", classname=\"formbuilder-choices\"),\n FieldPanel(\"default_value\", classname=\"formbuilder-default\"),\n ]\n\n def get_field_clean_name(self):\n \"\"\"\n Prepare an ascii safe lower_snake_case variant of the field name to use as the field key.\n This key is used to reference the field responses in the JSON store and as the field name in forms.\n Called for new field creation, validation of duplicate labels and form previews.\n When called, does not have access to the Page, nor its own id as the record is not yet created.\n \"\"\"\n\n return get_field_clean_name(self.label)\n\n def save(self, *args, **kwargs):\n \"\"\"\n When new fields are created, generate a template safe ascii name to use as the\n JSON storage reference for this field. Previously created fields will be updated\n to use the legacy unidecode method via checks & _migrate_legacy_clean_name.\n We do not want to update the clean name on any subsequent changes to the label\n as this would invalidate any previously submitted data.\n \"\"\"\n\n is_new = self.pk is None\n if is_new:\n clean_name = self.get_field_clean_name()\n self.clean_name = clean_name\n\n super().save(*args, **kwargs)\n\n class Meta:\n abstract = True\n ordering = [\"sort_order\"]\n\n\nclass AbstractForm(Page):\n \"\"\"\n A Form Page. Pages implementing a form should inherit from it\n \"\"\"\n\n base_form_class = WagtailAdminFormPageForm\n\n form_builder = FormBuilder\n\n submissions_list_view_class = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not hasattr(self, \"landing_page_template\"):\n name, ext = os.path.splitext(self.template)\n self.landing_page_template = name + \"_landing\" + ext\n\n class Meta:\n abstract = True\n\n def get_form_fields(self):\n \"\"\"\n Form page expects `form_fields` to be declared.\n If you want to change backwards relation name,\n you need to override this method.\n \"\"\"\n\n return self.form_fields.all()\n\n def get_data_fields(self):\n \"\"\"\n Returns a list of tuples with (field_name, field_label).\n \"\"\"\n\n data_fields = [\n (\"submit_time\", _(\"Submission date\")),\n ]\n data_fields += [\n (field.clean_name, field.label) for field in self.get_form_fields()\n ]\n\n return data_fields\n\n def get_form_class(self):\n fb = self.form_builder(self.get_form_fields())\n return fb.get_form_class()\n\n def get_form_parameters(self):\n return {}\n\n def get_form(self, *args, **kwargs):\n form_class = self.get_form_class()\n form_params = self.get_form_parameters()\n form_params.update(kwargs)\n\n return form_class(*args, **form_params)\n\n def get_landing_page_template(self, request, *args, **kwargs):\n return self.landing_page_template\n\n def get_submission_class(self):\n \"\"\"\n Returns submission class.\n\n You can override this method to provide custom submission class.\n Your class must be inherited from AbstractFormSubmission.\n \"\"\"\n\n return FormSubmission\n\n def get_submissions_list_view_class(self):\n from .views import SubmissionsListView\n\n return self.submissions_list_view_class or SubmissionsListView\n\n def process_form_submission(self, form):\n \"\"\"\n Accepts form instance with submitted data, user and page.\n Creates submission instance.\n\n You can override this method if you want to have custom creation logic.\n For example, if you want to save reference to a user.\n \"\"\"\n\n return self.get_submission_class().objects.create(\n form_data=form.cleaned_data,\n page=self,\n )\n\n def render_landing_page(self, request, form_submission=None, *args, **kwargs):\n \"\"\"\n Renders the landing page.\n\n You can override this method to return a different HttpResponse as\n landing page. E.g. you could return a redirect to a separate page.\n \"\"\"\n context = self.get_context(request)\n context[\"form_submission\"] = form_submission\n return TemplateResponse(\n request, self.get_landing_page_template(request), context\n )\n\n def serve_submissions_list_view(self, request, *args, **kwargs):\n \"\"\"\n Returns list submissions view for admin.\n\n `list_submissions_view_class` can bse set to provide custom view class.\n Your class must be inherited from SubmissionsListView.\n \"\"\"\n view = self.get_submissions_list_view_class().as_view()\n return view(request, form_page=self, *args, **kwargs)\n\n def serve(self, request, *args, **kwargs):\n if request.method == \"POST\":\n form = self.get_form(\n request.POST, request.FILES, page=self, user=request.user\n )\n\n if form.is_valid():\n form_submission = self.process_form_submission(form)\n return self.render_landing_page(\n request, form_submission, *args, **kwargs\n )\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context[\"form\"] = form\n return TemplateResponse(request, self.get_template(request), context)\n\n preview_modes = [\n (\"form\", _(\"Form\")),\n (\"landing\", _(\"Landing page\")),\n ]\n\n def serve_preview(self, request, mode_name):\n if mode_name == \"landing\":\n return self.render_landing_page(request)\n else:\n return super().serve_preview(request, mode_name)\n\n\ndef validate_to_address(value):\n for address in value.split(\",\"):\n validate_email(address.strip())\n\n\nclass AbstractEmailForm(AbstractForm):\n \"\"\"\n A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it\n \"\"\"\n\n to_address = models.CharField(\n verbose_name=_(\"to address\"),\n max_length=255,\n blank=True,\n help_text=_(\n \"Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.\"\n ),\n validators=[validate_to_address],\n )\n from_address = models.EmailField(\n verbose_name=_(\"from address\"), max_length=255, blank=True\n )\n subject = models.CharField(verbose_name=_(\"subject\"), max_length=255, blank=True)\n\n def process_form_submission(self, form):\n submission = super().process_form_submission(form)\n if self.to_address:\n self.send_mail(form)\n return submission\n\n def send_mail(self, form):\n addresses = [x.strip() for x in self.to_address.split(\",\")]\n send_mail(\n self.subject,\n self.render_email(form),\n addresses,\n self.from_address,\n )\n\n def render_email(self, form):\n content = []\n\n cleaned_data = form.cleaned_data\n for field in form:\n if field.name not in cleaned_data:\n continue\n\n value = cleaned_data.get(field.name)\n\n if isinstance(value, list):\n value = \", \".join(value)\n\n # Format dates and datetimes with SHORT_DATE(TIME)_FORMAT\n if isinstance(value, datetime.datetime):\n value = date_format(value, settings.SHORT_DATETIME_FORMAT)\n elif isinstance(value, datetime.date):\n value = date_format(value, settings.SHORT_DATE_FORMAT)\n\n content.append(\"{}: {}\".format(field.label, value))\n\n return \"\\n\".join(content)\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/forms/models.py"}], "after_files": [{"content": "import datetime\nimport os\n\nfrom django.conf import settings\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.template.response import TemplateResponse\nfrom django.utils.formats import date_format\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.mail import send_mail\nfrom wagtail.admin.panels import FieldPanel\nfrom wagtail.contrib.forms.utils import get_field_clean_name\nfrom wagtail.models import Orderable, Page\n\nfrom .forms import FormBuilder, WagtailAdminFormPageForm\n\nFORM_FIELD_CHOICES = (\n (\"singleline\", _(\"Single line text\")),\n (\"multiline\", _(\"Multi-line text\")),\n (\"email\", _(\"Email\")),\n (\"number\", _(\"Number\")),\n (\"url\", _(\"URL\")),\n (\"checkbox\", _(\"Checkbox\")),\n (\"checkboxes\", _(\"Checkboxes\")),\n (\"dropdown\", _(\"Drop down\")),\n (\"multiselect\", _(\"Multiple select\")),\n (\"radio\", _(\"Radio buttons\")),\n (\"date\", _(\"Date\")),\n (\"datetime\", _(\"Date/time\")),\n (\"hidden\", _(\"Hidden field\")),\n)\n\n\nclass AbstractFormSubmission(models.Model):\n \"\"\"\n Data for a form submission.\n\n You can create custom submission model based on this abstract model.\n For example, if you need to save additional data or a reference to a user.\n \"\"\"\n\n form_data = models.JSONField(encoder=DjangoJSONEncoder)\n page = models.ForeignKey(Page, on_delete=models.CASCADE)\n\n submit_time = models.DateTimeField(verbose_name=_(\"submit time\"), auto_now_add=True)\n\n def get_data(self):\n \"\"\"\n Returns dict with form data.\n\n You can override this method to add additional data.\n \"\"\"\n\n return {\n **self.form_data,\n \"submit_time\": self.submit_time,\n }\n\n def __str__(self):\n return f\"{self.form_data}\"\n\n class Meta:\n abstract = True\n verbose_name = _(\"form submission\")\n verbose_name_plural = _(\"form submissions\")\n\n\nclass FormSubmission(AbstractFormSubmission):\n \"\"\"Data for a Form submission.\"\"\"\n\n\nclass AbstractFormField(Orderable):\n \"\"\"\n Database Fields required for building a Django Form field.\n \"\"\"\n\n clean_name = models.CharField(\n verbose_name=_(\"name\"),\n max_length=255,\n blank=True,\n default=\"\",\n help_text=_(\n \"Safe name of the form field, the label converted to ascii_snake_case\"\n ),\n )\n label = models.CharField(\n verbose_name=_(\"label\"),\n max_length=255,\n help_text=_(\"The label of the form field\"),\n )\n field_type = models.CharField(\n verbose_name=_(\"field type\"), max_length=16, choices=FORM_FIELD_CHOICES\n )\n required = models.BooleanField(verbose_name=_(\"required\"), default=True)\n choices = models.TextField(\n verbose_name=_(\"choices\"),\n blank=True,\n help_text=_(\n \"Comma or new line separated list of choices. Only applicable in checkboxes, radio and dropdown.\"\n ),\n )\n default_value = models.TextField(\n verbose_name=_(\"default value\"),\n blank=True,\n help_text=_(\n \"Default value. Comma or new line separated values supported for checkboxes.\"\n ),\n )\n help_text = models.CharField(\n verbose_name=_(\"help text\"), max_length=255, blank=True\n )\n\n panels = [\n FieldPanel(\"label\"),\n FieldPanel(\"help_text\"),\n FieldPanel(\"required\"),\n FieldPanel(\"field_type\", classname=\"formbuilder-type\"),\n FieldPanel(\"choices\", classname=\"formbuilder-choices\"),\n FieldPanel(\"default_value\", classname=\"formbuilder-default\"),\n ]\n\n def get_field_clean_name(self):\n \"\"\"\n Prepare an ascii safe lower_snake_case variant of the field name to use as the field key.\n This key is used to reference the field responses in the JSON store and as the field name in forms.\n Called for new field creation, validation of duplicate labels and form previews.\n When called, does not have access to the Page, nor its own id as the record is not yet created.\n \"\"\"\n\n return get_field_clean_name(self.label)\n\n def save(self, *args, **kwargs):\n \"\"\"\n When new fields are created, generate a template safe ascii name to use as the\n JSON storage reference for this field. Previously created fields will be updated\n to use the legacy unidecode method via checks & _migrate_legacy_clean_name.\n We do not want to update the clean name on any subsequent changes to the label\n as this would invalidate any previously submitted data.\n \"\"\"\n\n is_new = self.pk is None\n if is_new:\n clean_name = self.get_field_clean_name()\n self.clean_name = clean_name\n\n super().save(*args, **kwargs)\n\n class Meta:\n abstract = True\n ordering = [\"sort_order\"]\n\n\nclass AbstractForm(Page):\n \"\"\"\n A Form Page. Pages implementing a form should inherit from it\n \"\"\"\n\n base_form_class = WagtailAdminFormPageForm\n\n form_builder = FormBuilder\n\n submissions_list_view_class = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not hasattr(self, \"landing_page_template\"):\n name, ext = os.path.splitext(self.template)\n self.landing_page_template = name + \"_landing\" + ext\n\n class Meta:\n abstract = True\n\n def get_form_fields(self):\n \"\"\"\n Form page expects `form_fields` to be declared.\n If you want to change backwards relation name,\n you need to override this method.\n \"\"\"\n\n return self.form_fields.all()\n\n def get_data_fields(self):\n \"\"\"\n Returns a list of tuples with (field_name, field_label).\n \"\"\"\n\n data_fields = [\n (\"submit_time\", _(\"Submission date\")),\n ]\n data_fields += [\n (field.clean_name, field.label) for field in self.get_form_fields()\n ]\n\n return data_fields\n\n def get_form_class(self):\n fb = self.form_builder(self.get_form_fields())\n return fb.get_form_class()\n\n def get_form_parameters(self):\n return {}\n\n def get_form(self, *args, **kwargs):\n form_class = self.get_form_class()\n form_params = self.get_form_parameters()\n form_params.update(kwargs)\n\n return form_class(*args, **form_params)\n\n def get_landing_page_template(self, request, *args, **kwargs):\n return self.landing_page_template\n\n def get_submission_class(self):\n \"\"\"\n Returns submission class.\n\n You can override this method to provide custom submission class.\n Your class must be inherited from AbstractFormSubmission.\n \"\"\"\n\n return FormSubmission\n\n def get_submissions_list_view_class(self):\n from .views import SubmissionsListView\n\n return self.submissions_list_view_class or SubmissionsListView\n\n def process_form_submission(self, form):\n \"\"\"\n Accepts form instance with submitted data, user and page.\n Creates submission instance.\n\n You can override this method if you want to have custom creation logic.\n For example, if you want to save reference to a user.\n \"\"\"\n\n return self.get_submission_class().objects.create(\n form_data=form.cleaned_data,\n page=self,\n )\n\n def render_landing_page(self, request, form_submission=None, *args, **kwargs):\n \"\"\"\n Renders the landing page.\n\n You can override this method to return a different HttpResponse as\n landing page. E.g. you could return a redirect to a separate page.\n \"\"\"\n context = self.get_context(request)\n context[\"form_submission\"] = form_submission\n return TemplateResponse(\n request, self.get_landing_page_template(request), context\n )\n\n def serve_submissions_list_view(self, request, *args, **kwargs):\n \"\"\"\n Returns list submissions view for admin.\n\n `list_submissions_view_class` can bse set to provide custom view class.\n Your class must be inherited from SubmissionsListView.\n \"\"\"\n view = self.get_submissions_list_view_class().as_view()\n return view(request, form_page=self, *args, **kwargs)\n\n def serve(self, request, *args, **kwargs):\n if request.method == \"POST\":\n form = self.get_form(\n request.POST, request.FILES, page=self, user=request.user\n )\n\n if form.is_valid():\n form_submission = self.process_form_submission(form)\n return self.render_landing_page(\n request, form_submission, *args, **kwargs\n )\n else:\n form = self.get_form(page=self, user=request.user)\n\n context = self.get_context(request)\n context[\"form\"] = form\n return TemplateResponse(request, self.get_template(request), context)\n\n preview_modes = [\n (\"form\", _(\"Form\")),\n (\"landing\", _(\"Landing page\")),\n ]\n\n def serve_preview(self, request, mode_name):\n if mode_name == \"landing\":\n return self.render_landing_page(request)\n else:\n return super().serve_preview(request, mode_name)\n\n\ndef validate_to_address(value):\n for address in value.split(\",\"):\n validate_email(address.strip())\n\n\nclass AbstractEmailForm(AbstractForm):\n \"\"\"\n A Form Page that sends email. Pages implementing a form to be send to an email should inherit from it\n \"\"\"\n\n to_address = models.CharField(\n verbose_name=_(\"to address\"),\n max_length=255,\n blank=True,\n help_text=_(\n \"Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.\"\n ),\n validators=[validate_to_address],\n )\n from_address = models.EmailField(\n verbose_name=_(\"from address\"), max_length=255, blank=True\n )\n subject = models.CharField(verbose_name=_(\"subject\"), max_length=255, blank=True)\n\n def process_form_submission(self, form):\n submission = super().process_form_submission(form)\n if self.to_address:\n self.send_mail(form)\n return submission\n\n def send_mail(self, form):\n addresses = [x.strip() for x in self.to_address.split(\",\")]\n send_mail(\n self.subject,\n self.render_email(form),\n addresses,\n self.from_address,\n )\n\n def render_email(self, form):\n content = []\n\n cleaned_data = form.cleaned_data\n for field in form:\n if field.name not in cleaned_data:\n continue\n\n value = cleaned_data.get(field.name)\n\n if isinstance(value, list):\n value = \", \".join(value)\n\n # Format dates and datetimes with SHORT_DATE(TIME)_FORMAT\n if isinstance(value, datetime.datetime):\n value = date_format(value, settings.SHORT_DATETIME_FORMAT)\n elif isinstance(value, datetime.date):\n value = date_format(value, settings.SHORT_DATE_FORMAT)\n\n content.append(\"{}: {}\".format(field.label, value))\n\n return \"\\n\".join(content)\n\n class Meta:\n abstract = True\n", "path": "wagtail/contrib/forms/models.py"}]}
| 3,805 | 92 |
gh_patches_debug_16915
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5322
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add unit test for Event helpers
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
We should add unit tests for event helpers.
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->
Parent: #5320
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/errors.py`
Content:
```
1 import json
2
3 from flask import make_response
4 from flask_rest_jsonapi.errors import jsonapi_errors
5
6
7 class ErrorResponse:
8 """
9 Parent ErrorResponse class for handling json-api compliant errors.
10 Inspired by the JsonApiException class of `flask-rest-jsonapi` itself
11 """
12 title = 'Unknown error'
13 status = 500
14 headers = {'Content-Type': 'application/vnd.api+json'}
15
16 def __init__(self, source, detail, title=None, status=None):
17 """Initialize a jsonapi ErrorResponse Object
18
19 :param dict source: the source of the error
20 :param str detail: the detail of the error
21 """
22 self.source = source
23 self.detail = detail
24 if title is not None:
25 self.title = title
26 if status is not None:
27 self.status = status
28
29 def respond(self):
30 """
31 :return: a jsonapi compliant response object
32 """
33 dict_ = self.to_dict()
34 return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)
35
36 def to_dict(self):
37 """
38 :return: Dict from details of the object
39 """
40 return {'status': self.status,
41 'source': self.source,
42 'title': self.title,
43 'detail': self.detail}
44
45
46 class ForbiddenError(ErrorResponse):
47 """
48 Default class for 403 Error
49 """
50 title = 'Access Forbidden'
51 status = 403
52
53
54 class NotFoundError(ErrorResponse):
55 """
56 Default class for 404 Error
57 """
58 title = 'Not Found'
59 status = 404
60
61
62 class ServerError(ErrorResponse):
63 status = 500
64 title = 'Internal Server Error'
65
66
67 class UnprocessableEntityError(ErrorResponse):
68 status = 422
69 title = 'Unprocessable Entity'
70
71
72 class BadRequestError(ErrorResponse):
73 status = 400
74 title = 'Bad Request'
75
```
Path: `app/api/helpers/exceptions.py`
Content:
```
1 from flask_rest_jsonapi.exceptions import JsonApiException
2
3
4 class UnprocessableEntity(JsonApiException):
5 title = "Unprocessable Entity"
6 status = 422
7
8
9 class ConflictException(JsonApiException):
10 title = "Conflict"
11 status = 409
12
13
14 class ForbiddenException(JsonApiException):
15 """
16 Default class for 403 Error
17 """
18 title = 'Access Forbidden'
19 status = 403
20
21
22 class MethodNotAllowed(JsonApiException):
23 """
24 Default Class to throw HTTP 405 Exception
25 """
26 title = "Method Not Allowed"
27 status = 405
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py
--- a/app/api/helpers/errors.py
+++ b/app/api/helpers/errors.py
@@ -65,10 +65,16 @@
class UnprocessableEntityError(ErrorResponse):
+ """
+ Default class for 422 Error
+ """
status = 422
title = 'Unprocessable Entity'
class BadRequestError(ErrorResponse):
+ """
+ Default class for 400 Error
+ """
status = 400
title = 'Bad Request'
diff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py
--- a/app/api/helpers/exceptions.py
+++ b/app/api/helpers/exceptions.py
@@ -2,11 +2,17 @@
class UnprocessableEntity(JsonApiException):
+ """
+ Default class for 422 Error
+ """
title = "Unprocessable Entity"
status = 422
class ConflictException(JsonApiException):
+ """
+ Default class for 409 Error
+ """
title = "Conflict"
status = 409
|
{"golden_diff": "diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py\n--- a/app/api/helpers/errors.py\n+++ b/app/api/helpers/errors.py\n@@ -65,10 +65,16 @@\n \n \n class UnprocessableEntityError(ErrorResponse):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n status = 422\n title = 'Unprocessable Entity'\n \n \n class BadRequestError(ErrorResponse):\n+ \"\"\"\n+ Default class for 400 Error\n+ \"\"\"\n status = 400\n title = 'Bad Request'\ndiff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py\n--- a/app/api/helpers/exceptions.py\n+++ b/app/api/helpers/exceptions.py\n@@ -2,11 +2,17 @@\n \n \n class UnprocessableEntity(JsonApiException):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n title = \"Unprocessable Entity\"\n status = 422\n \n \n class ConflictException(JsonApiException):\n+ \"\"\"\n+ Default class for 409 Error\n+ \"\"\"\n title = \"Conflict\"\n status = 409\n", "issue": "Add unit test for Event helpers\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\nWe should add unit tests for event helpers.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nParent: #5320 \n", "before_files": [{"content": "import json\n\nfrom flask import make_response\nfrom flask_rest_jsonapi.errors import jsonapi_errors\n\n\nclass ErrorResponse:\n \"\"\"\n Parent ErrorResponse class for handling json-api compliant errors.\n Inspired by the JsonApiException class of `flask-rest-jsonapi` itself\n \"\"\"\n title = 'Unknown error'\n status = 500\n headers = {'Content-Type': 'application/vnd.api+json'}\n\n def __init__(self, source, detail, title=None, status=None):\n \"\"\"Initialize a jsonapi ErrorResponse Object\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n \"\"\"\n self.source = source\n self.detail = detail\n if title is not None:\n self.title = title\n if status is not None:\n self.status = status\n\n def respond(self):\n \"\"\"\n :return: a jsonapi compliant response object\n \"\"\"\n dict_ = self.to_dict()\n return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)\n\n def to_dict(self):\n \"\"\"\n :return: Dict from details of the object\n \"\"\"\n return {'status': self.status,\n 'source': self.source,\n 'title': self.title,\n 'detail': self.detail}\n\n\nclass ForbiddenError(ErrorResponse):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass NotFoundError(ErrorResponse):\n \"\"\"\n Default class for 404 Error\n \"\"\"\n title = 'Not Found'\n status = 404\n\n\nclass ServerError(ErrorResponse):\n status = 500\n title = 'Internal Server Error'\n\n\nclass UnprocessableEntityError(ErrorResponse):\n status = 422\n title = 'Unprocessable Entity'\n\n\nclass BadRequestError(ErrorResponse):\n status = 400\n title = 'Bad Request'\n", "path": "app/api/helpers/errors.py"}, {"content": "from flask_rest_jsonapi.exceptions import JsonApiException\n\n\nclass UnprocessableEntity(JsonApiException):\n title = \"Unprocessable Entity\"\n status = 422\n\n\nclass ConflictException(JsonApiException):\n title = \"Conflict\"\n status = 409\n\n\nclass ForbiddenException(JsonApiException):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass MethodNotAllowed(JsonApiException):\n \"\"\"\n Default Class to throw HTTP 405 Exception\n \"\"\"\n title = \"Method Not Allowed\"\n status = 405\n", "path": "app/api/helpers/exceptions.py"}], "after_files": [{"content": "import json\n\nfrom flask import make_response\nfrom flask_rest_jsonapi.errors import jsonapi_errors\n\n\nclass ErrorResponse:\n \"\"\"\n Parent ErrorResponse class for handling json-api compliant errors.\n Inspired by the JsonApiException class of `flask-rest-jsonapi` itself\n \"\"\"\n title = 'Unknown error'\n status = 500\n headers = {'Content-Type': 'application/vnd.api+json'}\n\n def __init__(self, source, detail, title=None, status=None):\n \"\"\"Initialize a jsonapi ErrorResponse Object\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n \"\"\"\n self.source = source\n self.detail = detail\n if title is not None:\n self.title = title\n if status is not None:\n self.status = status\n\n def respond(self):\n \"\"\"\n :return: a jsonapi compliant response object\n \"\"\"\n dict_ = self.to_dict()\n return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)\n\n def to_dict(self):\n \"\"\"\n :return: Dict from details of the object\n \"\"\"\n return {'status': self.status,\n 'source': self.source,\n 'title': self.title,\n 'detail': self.detail}\n\n\nclass ForbiddenError(ErrorResponse):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass NotFoundError(ErrorResponse):\n \"\"\"\n Default class for 404 Error\n \"\"\"\n title = 'Not Found'\n status = 404\n\n\nclass ServerError(ErrorResponse):\n status = 500\n title = 'Internal Server Error'\n\n\nclass UnprocessableEntityError(ErrorResponse):\n \"\"\"\n Default class for 422 Error\n \"\"\"\n status = 422\n title = 'Unprocessable Entity'\n\n\nclass BadRequestError(ErrorResponse):\n \"\"\"\n Default class for 400 Error\n \"\"\"\n status = 400\n title = 'Bad Request'\n", "path": "app/api/helpers/errors.py"}, {"content": "from flask_rest_jsonapi.exceptions import JsonApiException\n\n\nclass UnprocessableEntity(JsonApiException):\n \"\"\"\n Default class for 422 Error\n \"\"\"\n title = \"Unprocessable Entity\"\n status = 422\n\n\nclass ConflictException(JsonApiException):\n \"\"\"\n Default class for 409 Error\n \"\"\"\n title = \"Conflict\"\n status = 409\n\n\nclass ForbiddenException(JsonApiException):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass MethodNotAllowed(JsonApiException):\n \"\"\"\n Default Class to throw HTTP 405 Exception\n \"\"\"\n title = \"Method Not Allowed\"\n status = 405\n", "path": "app/api/helpers/exceptions.py"}]}
| 1,106 | 256 |
gh_patches_debug_36249
|
rasdani/github-patches
|
git_diff
|
microsoft__AzureTRE-965
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename custom_parameters.json to template_schema.json (inc in python code)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api_app/models/schemas/user_resource_template.py`
Content:
```
1 from pydantic import Field
2
3 from models.domain.resource import ResourceType
4 from models.domain.resource_template import Property
5 from models.domain.user_resource_template import UserResourceTemplate
6 from models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse
7
8
9 def get_sample_user_resource_template_object(template_name: str = "guacamole-vm") -> UserResourceTemplate:
10 return UserResourceTemplate(
11 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
12 name=template_name,
13 parentWorkspaceService="guacamole",
14 description="user resource bundle",
15 version="0.1.0",
16 resourceType=ResourceType.UserResource,
17 current=True,
18 type="object",
19 required=["display_name", "description"],
20 properties={
21 "display_name": Property(type="string"),
22 "description": Property(type="string")
23 }
24 )
25
26
27 def get_sample_user_resource_template() -> dict:
28 return get_sample_user_resource_template_object().dict()
29
30
31 def get_sample_user_resource_template_in_response() -> dict:
32 workspace_template = get_sample_user_resource_template()
33 return workspace_template
34
35
36 class UserResourceTemplateInCreate(ResourceTemplateInCreate):
37
38 class Config:
39 schema_extra = {
40 "example": {
41 "name": "my-tre-user-resource",
42 "version": "0.0.1",
43 "current": "true",
44 "json_schema": {
45 "$schema": "http://json-schema.org/draft-07/schema",
46 "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json",
47 "type": "object",
48 "title": "My User Resource Template Custom Parameters",
49 "description": "These parameters are specific to my user resource template",
50 "required": [],
51 "properties": {}
52 }
53 }
54 }
55
56
57 class UserResourceTemplateInResponse(ResourceTemplateInResponse):
58 parentWorkspaceService: str = Field(title="Workspace type", description="Bundle name")
59
60 class Config:
61 schema_extra = {
62 "example": get_sample_user_resource_template_in_response()
63 }
64
```
Path: `api_app/models/schemas/workspace_template.py`
Content:
```
1 from models.domain.resource import ResourceType
2 from models.domain.resource_template import ResourceTemplate, Property
3 from models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse
4
5
6 def get_sample_workspace_template_object(template_name: str = "tre-workspace-base") -> ResourceTemplate:
7 return ResourceTemplate(
8 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
9 name=template_name,
10 description="base workspace bundle",
11 version="0.1.0",
12 resourceType=ResourceType.Workspace,
13 current=True,
14 type="object",
15 required=["display_name", "description", "app_id"],
16 properties={
17 "display_name": Property(type="string"),
18 "description": Property(type="string"),
19 "app_id": Property(type="string"),
20 "address_space": Property(type="string", default="10.2.1.0/24", description="VNet address space for the workspace services")
21 }
22 )
23
24
25 def get_sample_workspace_template_in_response() -> dict:
26 workspace_template = get_sample_workspace_template_object().dict()
27 workspace_template["system_properties"] = {
28 "tre_id": Property(type="string"),
29 "workspace_id": Property(type="string"),
30 "azure_location": Property(type="string"),
31 }
32 return workspace_template
33
34
35 class WorkspaceTemplateInCreate(ResourceTemplateInCreate):
36
37 class Config:
38 schema_extra = {
39 "example": {
40 "name": "my-tre-workspace",
41 "version": "0.0.1",
42 "current": "true",
43 "json_schema": {
44 "$schema": "http://json-schema.org/draft-07/schema",
45 "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json",
46 "type": "object",
47 "title": "My Workspace Template Custom Parameters",
48 "description": "These parameters are specific to my workspace template",
49 "required": [
50 "vm_size",
51 "no_of_vms"
52 ],
53 "properties": {
54 "vm_size": {
55 "$id": "#/properties/vm_size",
56 "type": "string",
57 "title": "VM size",
58 "description": "Size of the VMs in my workspace",
59 "default": "Standard_A1",
60 "enum": [
61 "Standard_A1",
62 "Standard_A2",
63 "Standard_A3"
64 ]
65 },
66 "no_of_vms": {
67 "$id": "#/properties/no_of_vms",
68 "type": "integer",
69 "title": "Number of VMs",
70 "description": "Number of virtual machines to be deployed in the workspace",
71 "default": 0
72 }
73 }
74 }
75 }
76 }
77
78
79 class WorkspaceTemplateInResponse(ResourceTemplateInResponse):
80
81 class Config:
82 schema_extra = {
83 "example": get_sample_workspace_template_in_response()
84 }
85
```
Path: `api_app/models/schemas/workspace_service_template.py`
Content:
```
1 from models.domain.resource import ResourceType
2 from models.domain.resource_template import ResourceTemplate, Property
3 from models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse
4
5
6 def get_sample_workspace_service_template_object(template_name: str = "tre-workspace-service") -> ResourceTemplate:
7 return ResourceTemplate(
8 id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb",
9 name=template_name,
10 description="workspace service bundle",
11 version="0.1.0",
12 resourceType=ResourceType.WorkspaceService,
13 current=True,
14 type="object",
15 required=["display_name", "description"],
16 properties={
17 "display_name": Property(type="string"),
18 "description": Property(type="string")
19 }
20 )
21
22
23 def get_sample_workspace_service_template() -> dict:
24 return get_sample_workspace_service_template_object().dict()
25
26
27 def get_sample_workspace_service_template_in_response() -> dict:
28 workspace_template = get_sample_workspace_service_template()
29 workspace_template["system_properties"] = {
30 "tre_id": Property(type="string"),
31 "workspace_id": Property(type="string"),
32 "azure_location": Property(type="string"),
33 }
34 return workspace_template
35
36
37 class WorkspaceServiceTemplateInCreate(ResourceTemplateInCreate):
38
39 class Config:
40 schema_extra = {
41 "example": {
42 "name": "my-tre-workspace-service",
43 "version": "0.0.1",
44 "current": "true",
45 "json_schema": {
46 "$schema": "http://json-schema.org/draft-07/schema",
47 "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json",
48 "type": "object",
49 "title": "My Workspace Service Template Custom Parameters",
50 "description": "These parameters are specific to my workspace service template",
51 "required": [],
52 "properties": {}
53 }
54 }
55 }
56
57
58 class WorkspaceServiceTemplateInResponse(ResourceTemplateInResponse):
59
60 class Config:
61 schema_extra = {
62 "example": get_sample_workspace_service_template_in_response()
63 }
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api_app/models/schemas/user_resource_template.py b/api_app/models/schemas/user_resource_template.py
--- a/api_app/models/schemas/user_resource_template.py
+++ b/api_app/models/schemas/user_resource_template.py
@@ -45,8 +45,8 @@
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json",
"type": "object",
- "title": "My User Resource Template Custom Parameters",
- "description": "These parameters are specific to my user resource template",
+ "title": "My User Resource Template",
+ "description": "These is a test user resource template schema",
"required": [],
"properties": {}
}
diff --git a/api_app/models/schemas/workspace_service_template.py b/api_app/models/schemas/workspace_service_template.py
--- a/api_app/models/schemas/workspace_service_template.py
+++ b/api_app/models/schemas/workspace_service_template.py
@@ -46,8 +46,8 @@
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json",
"type": "object",
- "title": "My Workspace Service Template Custom Parameters",
- "description": "These parameters are specific to my workspace service template",
+ "title": "My Workspace Service Template",
+ "description": "These is a test workspace service resource template schema",
"required": [],
"properties": {}
}
diff --git a/api_app/models/schemas/workspace_template.py b/api_app/models/schemas/workspace_template.py
--- a/api_app/models/schemas/workspace_template.py
+++ b/api_app/models/schemas/workspace_template.py
@@ -44,8 +44,8 @@
"$schema": "http://json-schema.org/draft-07/schema",
"$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json",
"type": "object",
- "title": "My Workspace Template Custom Parameters",
- "description": "These parameters are specific to my workspace template",
+ "title": "My Workspace Template",
+ "description": "This is a test workspace template schema",
"required": [
"vm_size",
"no_of_vms"
|
{"golden_diff": "diff --git a/api_app/models/schemas/user_resource_template.py b/api_app/models/schemas/user_resource_template.py\n--- a/api_app/models/schemas/user_resource_template.py\n+++ b/api_app/models/schemas/user_resource_template.py\n@@ -45,8 +45,8 @@\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json\",\n \"type\": \"object\",\n- \"title\": \"My User Resource Template Custom Parameters\",\n- \"description\": \"These parameters are specific to my user resource template\",\n+ \"title\": \"My User Resource Template\",\n+ \"description\": \"These is a test user resource template schema\",\n \"required\": [],\n \"properties\": {}\n }\ndiff --git a/api_app/models/schemas/workspace_service_template.py b/api_app/models/schemas/workspace_service_template.py\n--- a/api_app/models/schemas/workspace_service_template.py\n+++ b/api_app/models/schemas/workspace_service_template.py\n@@ -46,8 +46,8 @@\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json\",\n \"type\": \"object\",\n- \"title\": \"My Workspace Service Template Custom Parameters\",\n- \"description\": \"These parameters are specific to my workspace service template\",\n+ \"title\": \"My Workspace Service Template\",\n+ \"description\": \"These is a test workspace service resource template schema\",\n \"required\": [],\n \"properties\": {}\n }\ndiff --git a/api_app/models/schemas/workspace_template.py b/api_app/models/schemas/workspace_template.py\n--- a/api_app/models/schemas/workspace_template.py\n+++ b/api_app/models/schemas/workspace_template.py\n@@ -44,8 +44,8 @@\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json\",\n \"type\": \"object\",\n- \"title\": \"My Workspace Template Custom Parameters\",\n- \"description\": \"These parameters are specific to my workspace template\",\n+ \"title\": \"My Workspace Template\",\n+ \"description\": \"This is a test workspace template schema\",\n \"required\": [\n \"vm_size\",\n \"no_of_vms\"\n", "issue": "Rename custom_parameters.json to template_schema.json (inc in python code)\n\n", "before_files": [{"content": "from pydantic import Field\n\nfrom models.domain.resource import ResourceType\nfrom models.domain.resource_template import Property\nfrom models.domain.user_resource_template import UserResourceTemplate\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_user_resource_template_object(template_name: str = \"guacamole-vm\") -> UserResourceTemplate:\n return UserResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n parentWorkspaceService=\"guacamole\",\n description=\"user resource bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.UserResource,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\")\n }\n )\n\n\ndef get_sample_user_resource_template() -> dict:\n return get_sample_user_resource_template_object().dict()\n\n\ndef get_sample_user_resource_template_in_response() -> dict:\n workspace_template = get_sample_user_resource_template()\n return workspace_template\n\n\nclass UserResourceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-user-resource\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json\",\n \"type\": \"object\",\n \"title\": \"My User Resource Template Custom Parameters\",\n \"description\": \"These parameters are specific to my user resource template\",\n \"required\": [],\n \"properties\": {}\n }\n }\n }\n\n\nclass UserResourceTemplateInResponse(ResourceTemplateInResponse):\n parentWorkspaceService: str = Field(title=\"Workspace type\", description=\"Bundle name\")\n\n class Config:\n schema_extra = {\n \"example\": get_sample_user_resource_template_in_response()\n }\n", "path": "api_app/models/schemas/user_resource_template.py"}, {"content": "from models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Property\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_workspace_template_object(template_name: str = \"tre-workspace-base\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"base workspace bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.Workspace,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\", \"app_id\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\"),\n \"app_id\": Property(type=\"string\"),\n \"address_space\": Property(type=\"string\", default=\"10.2.1.0/24\", description=\"VNet address space for the workspace services\")\n }\n )\n\n\ndef get_sample_workspace_template_in_response() -> dict:\n workspace_template = get_sample_workspace_template_object().dict()\n workspace_template[\"system_properties\"] = {\n \"tre_id\": Property(type=\"string\"),\n \"workspace_id\": Property(type=\"string\"),\n \"azure_location\": Property(type=\"string\"),\n }\n return workspace_template\n\n\nclass WorkspaceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-workspace\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json\",\n \"type\": \"object\",\n \"title\": \"My Workspace Template Custom Parameters\",\n \"description\": \"These parameters are specific to my workspace template\",\n \"required\": [\n \"vm_size\",\n \"no_of_vms\"\n ],\n \"properties\": {\n \"vm_size\": {\n \"$id\": \"#/properties/vm_size\",\n \"type\": \"string\",\n \"title\": \"VM size\",\n \"description\": \"Size of the VMs in my workspace\",\n \"default\": \"Standard_A1\",\n \"enum\": [\n \"Standard_A1\",\n \"Standard_A2\",\n \"Standard_A3\"\n ]\n },\n \"no_of_vms\": {\n \"$id\": \"#/properties/no_of_vms\",\n \"type\": \"integer\",\n \"title\": \"Number of VMs\",\n \"description\": \"Number of virtual machines to be deployed in the workspace\",\n \"default\": 0\n }\n }\n }\n }\n }\n\n\nclass WorkspaceTemplateInResponse(ResourceTemplateInResponse):\n\n class Config:\n schema_extra = {\n \"example\": get_sample_workspace_template_in_response()\n }\n", "path": "api_app/models/schemas/workspace_template.py"}, {"content": "from models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Property\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_workspace_service_template_object(template_name: str = \"tre-workspace-service\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"workspace service bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.WorkspaceService,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\")\n }\n )\n\n\ndef get_sample_workspace_service_template() -> dict:\n return get_sample_workspace_service_template_object().dict()\n\n\ndef get_sample_workspace_service_template_in_response() -> dict:\n workspace_template = get_sample_workspace_service_template()\n workspace_template[\"system_properties\"] = {\n \"tre_id\": Property(type=\"string\"),\n \"workspace_id\": Property(type=\"string\"),\n \"azure_location\": Property(type=\"string\"),\n }\n return workspace_template\n\n\nclass WorkspaceServiceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-workspace-service\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json\",\n \"type\": \"object\",\n \"title\": \"My Workspace Service Template Custom Parameters\",\n \"description\": \"These parameters are specific to my workspace service template\",\n \"required\": [],\n \"properties\": {}\n }\n }\n }\n\n\nclass WorkspaceServiceTemplateInResponse(ResourceTemplateInResponse):\n\n class Config:\n schema_extra = {\n \"example\": get_sample_workspace_service_template_in_response()\n }\n", "path": "api_app/models/schemas/workspace_service_template.py"}], "after_files": [{"content": "from pydantic import Field\n\nfrom models.domain.resource import ResourceType\nfrom models.domain.resource_template import Property\nfrom models.domain.user_resource_template import UserResourceTemplate\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_user_resource_template_object(template_name: str = \"guacamole-vm\") -> UserResourceTemplate:\n return UserResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n parentWorkspaceService=\"guacamole\",\n description=\"user resource bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.UserResource,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\")\n }\n )\n\n\ndef get_sample_user_resource_template() -> dict:\n return get_sample_user_resource_template_object().dict()\n\n\ndef get_sample_user_resource_template_in_response() -> dict:\n workspace_template = get_sample_user_resource_template()\n return workspace_template\n\n\nclass UserResourceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-user-resource\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/user_resource.json\",\n \"type\": \"object\",\n \"title\": \"My User Resource Template\",\n \"description\": \"These is a test user resource template schema\",\n \"required\": [],\n \"properties\": {}\n }\n }\n }\n\n\nclass UserResourceTemplateInResponse(ResourceTemplateInResponse):\n parentWorkspaceService: str = Field(title=\"Workspace type\", description=\"Bundle name\")\n\n class Config:\n schema_extra = {\n \"example\": get_sample_user_resource_template_in_response()\n }\n", "path": "api_app/models/schemas/user_resource_template.py"}, {"content": "from models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Property\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_workspace_template_object(template_name: str = \"tre-workspace-base\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"base workspace bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.Workspace,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\", \"app_id\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\"),\n \"app_id\": Property(type=\"string\"),\n \"address_space\": Property(type=\"string\", default=\"10.2.1.0/24\", description=\"VNet address space for the workspace services\")\n }\n )\n\n\ndef get_sample_workspace_template_in_response() -> dict:\n workspace_template = get_sample_workspace_template_object().dict()\n workspace_template[\"system_properties\"] = {\n \"tre_id\": Property(type=\"string\"),\n \"workspace_id\": Property(type=\"string\"),\n \"azure_location\": Property(type=\"string\"),\n }\n return workspace_template\n\n\nclass WorkspaceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-workspace\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json\",\n \"type\": \"object\",\n \"title\": \"My Workspace Template\",\n \"description\": \"This is a test workspace template schema\",\n \"required\": [\n \"vm_size\",\n \"no_of_vms\"\n ],\n \"properties\": {\n \"vm_size\": {\n \"$id\": \"#/properties/vm_size\",\n \"type\": \"string\",\n \"title\": \"VM size\",\n \"description\": \"Size of the VMs in my workspace\",\n \"default\": \"Standard_A1\",\n \"enum\": [\n \"Standard_A1\",\n \"Standard_A2\",\n \"Standard_A3\"\n ]\n },\n \"no_of_vms\": {\n \"$id\": \"#/properties/no_of_vms\",\n \"type\": \"integer\",\n \"title\": \"Number of VMs\",\n \"description\": \"Number of virtual machines to be deployed in the workspace\",\n \"default\": 0\n }\n }\n }\n }\n }\n\n\nclass WorkspaceTemplateInResponse(ResourceTemplateInResponse):\n\n class Config:\n schema_extra = {\n \"example\": get_sample_workspace_template_in_response()\n }\n", "path": "api_app/models/schemas/workspace_template.py"}, {"content": "from models.domain.resource import ResourceType\nfrom models.domain.resource_template import ResourceTemplate, Property\nfrom models.schemas.resource_template import ResourceTemplateInCreate, ResourceTemplateInResponse\n\n\ndef get_sample_workspace_service_template_object(template_name: str = \"tre-workspace-service\") -> ResourceTemplate:\n return ResourceTemplate(\n id=\"a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb\",\n name=template_name,\n description=\"workspace service bundle\",\n version=\"0.1.0\",\n resourceType=ResourceType.WorkspaceService,\n current=True,\n type=\"object\",\n required=[\"display_name\", \"description\"],\n properties={\n \"display_name\": Property(type=\"string\"),\n \"description\": Property(type=\"string\")\n }\n )\n\n\ndef get_sample_workspace_service_template() -> dict:\n return get_sample_workspace_service_template_object().dict()\n\n\ndef get_sample_workspace_service_template_in_response() -> dict:\n workspace_template = get_sample_workspace_service_template()\n workspace_template[\"system_properties\"] = {\n \"tre_id\": Property(type=\"string\"),\n \"workspace_id\": Property(type=\"string\"),\n \"azure_location\": Property(type=\"string\"),\n }\n return workspace_template\n\n\nclass WorkspaceServiceTemplateInCreate(ResourceTemplateInCreate):\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"my-tre-workspace-service\",\n \"version\": \"0.0.1\",\n \"current\": \"true\",\n \"json_schema\": {\n \"$schema\": \"http://json-schema.org/draft-07/schema\",\n \"$id\": \"https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json\",\n \"type\": \"object\",\n \"title\": \"My Workspace Service Template\",\n \"description\": \"These is a test workspace service resource template schema\",\n \"required\": [],\n \"properties\": {}\n }\n }\n }\n\n\nclass WorkspaceServiceTemplateInResponse(ResourceTemplateInResponse):\n\n class Config:\n schema_extra = {\n \"example\": get_sample_workspace_service_template_in_response()\n }\n", "path": "api_app/models/schemas/workspace_service_template.py"}]}
| 2,279 | 522 |
gh_patches_debug_15280
|
rasdani/github-patches
|
git_diff
|
PlasmaPy__PlasmaPy-891
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change name of chat room from Matrix/Riot to Element
The name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/particles/isotopes.py`
Content:
```
1 """
2 Module for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.
3
4 .. attention::
5 This module only contains non-public functionality. To learn more about the
6 package functionality, then examine the code itself.
7 """
8 __all__ = []
9
10 import astropy.units as u
11 import json
12 import pkgutil
13
14 # this code was used to create the JSON file as per vn-ki on Riot:
15 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
16 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
17 #
18 # def _isotope_default(obj):
19 # if isinstance(obj, u.Quantity):
20 # return {
21 # "unit": obj.unit.name,
22 # "value": obj.value,
23 # }
24 # with open("isotopes.json", "w") as f:
25 # json.dump(_Isotopes, f, default=plasma_default, indent=2)
26
27
28 def _isotope_obj_hook(obj):
29 """An `object_hook` designed for `json.load` and `json.loads`."""
30 if "unit" in obj:
31 return obj["value"] * u.Unit(obj["unit"])
32 return obj
33
34
35 #: Dictionary of isotope data.
36 _Isotopes = json.loads(
37 pkgutil.get_data("plasmapy", "particles/data/isotopes.json"),
38 object_hook=_isotope_obj_hook,
39 )
40
```
Path: `plasmapy/particles/elements.py`
Content:
```
1 """
2 Module for loading atomic data for elements from
3 :file:`plasmapy/particles/data/elements.json`.
4
5 The periodic tabla data is from: http://periodic.lanl.gov/index.shtml
6
7 .. attention::
8 This module only contains non-public functionality. To learn more about the
9 package functionality, examine the code itself.
10 """
11 __all__ = []
12
13 import astropy.units as u
14 import collections
15 import json
16 import pkgutil
17
18 _PeriodicTable = collections.namedtuple(
19 "periodic_table", ["group", "category", "block", "period"]
20 )
21
22
23 def _element_obj_hook(obj):
24 if "unit" in obj:
25 return obj["value"] * u.Unit(obj["unit"])
26 return obj
27
28
29 # this code was used to create the JSON file as per vn-ki on Riot:
30 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
31 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
32 #
33 # def plasma_default(obj):
34 # if isinstance(obj, u.Quantity):
35 # return {
36 # "unit": obj.unit.name,
37 # "value": obj.value,
38 # }
39 #
40 # with open("elements.json", "w") as f:
41 # json.dump(_Elements, f, default=plasma_default, indent=2)
42
43
44 _Elements = json.loads(
45 pkgutil.get_data("plasmapy", "particles/data/elements.json"),
46 object_hook=_element_obj_hook,
47 )
48
49
50 _atomic_numbers_to_symbols = {
51 elemdict["atomic number"]: symb for (symb, elemdict) in _Elements.items()
52 }
53
54 _element_names_to_symbols = {
55 elemdict["element name"]: symb for (symb, elemdict) in _Elements.items()
56 }
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py
--- a/plasmapy/particles/elements.py
+++ b/plasmapy/particles/elements.py
@@ -26,7 +26,7 @@
return obj
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
diff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py
--- a/plasmapy/particles/isotopes.py
+++ b/plasmapy/particles/isotopes.py
@@ -11,7 +11,7 @@
import json
import pkgutil
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
|
{"golden_diff": "diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py\n--- a/plasmapy/particles/elements.py\n+++ b/plasmapy/particles/elements.py\n@@ -26,7 +26,7 @@\n return obj\n \n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\ndiff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py\n--- a/plasmapy/particles/isotopes.py\n+++ b/plasmapy/particles/isotopes.py\n@@ -11,7 +11,7 @@\n import json\n import pkgutil\n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\n", "issue": "Change name of chat room from Matrix/Riot to Element\nThe name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.\n", "before_files": [{"content": "\"\"\"\nModule for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, then examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport json\nimport pkgutil\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def _isotope_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n# with open(\"isotopes.json\", \"w\") as f:\n# json.dump(_Isotopes, f, default=plasma_default, indent=2)\n\n\ndef _isotope_obj_hook(obj):\n \"\"\"An `object_hook` designed for `json.load` and `json.loads`.\"\"\"\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n#: Dictionary of isotope data.\n_Isotopes = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/isotopes.json\"),\n object_hook=_isotope_obj_hook,\n)\n", "path": "plasmapy/particles/isotopes.py"}, {"content": "\"\"\"\nModule for loading atomic data for elements from\n:file:`plasmapy/particles/data/elements.json`.\n\nThe periodic tabla data is from: http://periodic.lanl.gov/index.shtml\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport collections\nimport json\nimport pkgutil\n\n_PeriodicTable = collections.namedtuple(\n \"periodic_table\", [\"group\", \"category\", \"block\", \"period\"]\n)\n\n\ndef _element_obj_hook(obj):\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def plasma_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n#\n# with open(\"elements.json\", \"w\") as f:\n# json.dump(_Elements, f, default=plasma_default, indent=2)\n\n\n_Elements = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/elements.json\"),\n object_hook=_element_obj_hook,\n)\n\n\n_atomic_numbers_to_symbols = {\n elemdict[\"atomic number\"]: symb for (symb, elemdict) in _Elements.items()\n}\n\n_element_names_to_symbols = {\n elemdict[\"element name\"]: symb for (symb, elemdict) in _Elements.items()\n}\n", "path": "plasmapy/particles/elements.py"}], "after_files": [{"content": "\"\"\"\nModule for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, then examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport json\nimport pkgutil\n\n# this code was used to create the JSON file as per vn-ki on Matrix:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def _isotope_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n# with open(\"isotopes.json\", \"w\") as f:\n# json.dump(_Isotopes, f, default=plasma_default, indent=2)\n\n\ndef _isotope_obj_hook(obj):\n \"\"\"An `object_hook` designed for `json.load` and `json.loads`.\"\"\"\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n#: Dictionary of isotope data.\n_Isotopes = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/isotopes.json\"),\n object_hook=_isotope_obj_hook,\n)\n", "path": "plasmapy/particles/isotopes.py"}, {"content": "\"\"\"\nModule for loading atomic data for elements from\n:file:`plasmapy/particles/data/elements.json`.\n\nThe periodic tabla data is from: http://periodic.lanl.gov/index.shtml\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport collections\nimport json\nimport pkgutil\n\n_PeriodicTable = collections.namedtuple(\n \"periodic_table\", [\"group\", \"category\", \"block\", \"period\"]\n)\n\n\ndef _element_obj_hook(obj):\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n# this code was used to create the JSON file as per vn-ki on Matrix:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def plasma_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n#\n# with open(\"elements.json\", \"w\") as f:\n# json.dump(_Elements, f, default=plasma_default, indent=2)\n\n\n_Elements = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/elements.json\"),\n object_hook=_element_obj_hook,\n)\n\n\n_atomic_numbers_to_symbols = {\n elemdict[\"atomic number\"]: symb for (symb, elemdict) in _Elements.items()\n}\n\n_element_names_to_symbols = {\n elemdict[\"element name\"]: symb for (symb, elemdict) in _Elements.items()\n}\n", "path": "plasmapy/particles/elements.py"}]}
| 1,274 | 336 |
gh_patches_debug_2500
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-692
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Intel-optimized version of the package
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages
2 from setuptools import setup
3
4 # Required dependencies
5 required = [
6 # Please keep alphabetized
7 'akro',
8 'boto3',
9 'cached_property',
10 'click',
11 'cloudpickle',
12 'cma==1.1.06',
13 # dm_control throws an error during install about not being able to
14 # find a build dependency (absl-py). Later pip executes the `install`
15 # command again and the install succeeds because absl-py has been
16 # installed. This is stupid, but harmless.
17 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501
18 'dowel==0.0.2',
19 'gym[all]==0.10.11',
20 'joblib<0.13,>=0.12',
21 'matplotlib',
22 'mujoco-py<1.50.2,>=1.50.1',
23 'numpy==1.14.5',
24 'psutil',
25 'pyprind',
26 'python-dateutil',
27 'scikit-image',
28 'scipy',
29 'tensorflow<1.13,>=1.12.0',
30 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12
31 ]
32
33 # Dependencies for optional features
34 extras = {}
35 extras['all'] = list(set(sum(extras.values(), [])))
36
37 # Development dependencies (*not* included in "all")
38 extras['dev'] = [
39 # Please keep alphabetized
40 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501
41 'coverage',
42 'flake8',
43 'flake8-docstrings==1.3.0',
44 'flake8-import-order',
45 'nose2',
46 'pandas',
47 'pep8-naming==0.7.0',
48 'pre-commit',
49 'pylint==1.9.2',
50 'sphinx',
51 'sphinx_rtd_theme',
52 'yapf',
53 ]
54
55 with open('README.md') as f:
56 readme = f.read()
57
58 # Get the package version dynamically
59 with open('VERSION') as v:
60 version = v.read().strip()
61
62 setup(
63 name='garage',
64 version=version,
65 author='Reinforcement Learning Working Group',
66 description='A framework for reproducible reinforcement learning research',
67 url='https://github.com/rlworkgroup/garage',
68 packages=find_packages(where='src'),
69 package_dir={'': 'src'},
70 scripts=['scripts/garage'],
71 install_requires=required,
72 extras_require=extras,
73 license='MIT',
74 long_description=readme,
75 long_description_content_type='text/markdown',
76 classifiers=[
77 'Development Status :: 4 - Beta',
78 'Intended Audience :: Developers',
79 'Intended Audience :: Education',
80 'Intended Audience :: Science/Research',
81 'License :: OSI Approved :: MIT License',
82 'Programming Language :: Python :: 3.5',
83 'Programming Language :: Python :: 3.6',
84 'Programming Language :: Python :: 3.7',
85 'Programming Language :: Python :: 3 :: Only',
86 'Topic :: Scientific/Engineering :: Artificial Intelligence',
87 'Topic :: Scientific/Engineering :: Mathematics',
88 'Topic :: Software Development :: Libraries',
89 ],
90 )
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,9 @@
extras = {}
extras['all'] = list(set(sum(extras.values(), [])))
+# Intel dependencies not included in all
+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']
+
# Development dependencies (*not* included in "all")
extras['dev'] = [
# Please keep alphabetized
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,6 +34,9 @@\n extras = {}\n extras['all'] = list(set(sum(extras.values(), [])))\n \n+# Intel dependencies not included in all\n+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n+\n # Development dependencies (*not* included in \"all\")\n extras['dev'] = [\n # Please keep alphabetized\n", "issue": "Add Intel-optimized version of the package\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro',\n 'boto3',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.10.11',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<1.50.2,>=1.50.1',\n 'numpy==1.14.5',\n 'psutil',\n 'pyprind',\n 'python-dateutil',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'coverage',\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'nose2',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pylint==1.9.2',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro',\n 'boto3',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.10.11',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<1.50.2,>=1.50.1',\n 'numpy==1.14.5',\n 'psutil',\n 'pyprind',\n 'python-dateutil',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Intel dependencies not included in all\nextras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'coverage',\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'nose2',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pylint==1.9.2',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
| 1,265 | 108 |
gh_patches_debug_16985
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2369
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double bangs without query redirect to search URL instead of main page
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
`2023.4.21+98387e29`
**How did you install SearXNG?**
`searxng-docker`
**What happened?**
Double bangs without query redirect to search URL instead of main page
**How To Reproduce**
Use external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=`
**Expected behavior**
`!!gh` should redirect to `https://github.com` if no search items
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/external_bang.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2
3 from urllib.parse import quote_plus
4 from searx.data import EXTERNAL_BANGS
5
6 LEAF_KEY = chr(16)
7
8
9 def get_node(external_bangs_db, bang):
10 node = external_bangs_db['trie']
11 after = ''
12 before = ''
13 for bang_letter in bang:
14 after += bang_letter
15 if after in node and isinstance(node, dict):
16 node = node[after]
17 before += after
18 after = ''
19 return node, before, after
20
21
22 def get_bang_definition_and_ac(external_bangs_db, bang):
23 node, before, after = get_node(external_bangs_db, bang)
24
25 bang_definition = None
26 bang_ac_list = []
27 if after != '':
28 for k in node:
29 if k.startswith(after):
30 bang_ac_list.append(before + k)
31 elif isinstance(node, dict):
32 bang_definition = node.get(LEAF_KEY)
33 bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]
34 elif isinstance(node, str):
35 bang_definition = node
36 bang_ac_list = []
37
38 return bang_definition, bang_ac_list
39
40
41 def resolve_bang_definition(bang_definition, query):
42 url, rank = bang_definition.split(chr(1))
43 url = url.replace(chr(2), quote_plus(query))
44 if url.startswith('//'):
45 url = 'https:' + url
46 rank = int(rank) if len(rank) > 0 else 0
47 return (url, rank)
48
49
50 def get_bang_definition_and_autocomplete(bang, external_bangs_db=None):
51 if external_bangs_db is None:
52 external_bangs_db = EXTERNAL_BANGS
53
54 bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)
55
56 new_autocomplete = []
57 current = [*bang_ac_list]
58 done = set()
59 while len(current) > 0:
60 bang_ac = current.pop(0)
61 done.add(bang_ac)
62
63 current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)
64 if current_bang_definition:
65 _, order = resolve_bang_definition(current_bang_definition, '')
66 new_autocomplete.append((bang_ac, order))
67 for new_bang in current_bang_ac_list:
68 if new_bang not in done and new_bang not in current:
69 current.append(new_bang)
70
71 new_autocomplete.sort(key=lambda t: (-t[1], t[0]))
72 new_autocomplete = list(map(lambda t: t[0], new_autocomplete))
73
74 return bang_definition, new_autocomplete
75
76
77 def get_bang_url(search_query, external_bangs_db=None):
78 """
79 Redirects if the user supplied a correct bang search.
80 :param search_query: This is a search_query object which contains preferences and the submitted queries.
81 :return: None if the bang was invalid, else a string of the redirect url.
82 """
83 ret_val = None
84
85 if external_bangs_db is None:
86 external_bangs_db = EXTERNAL_BANGS
87
88 if search_query.external_bang:
89 bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)
90 if bang_definition and isinstance(bang_definition, str):
91 ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]
92
93 return ret_val
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/external_bang.py b/searx/external_bang.py
--- a/searx/external_bang.py
+++ b/searx/external_bang.py
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
-from urllib.parse import quote_plus
+from urllib.parse import quote_plus, urlparse
from searx.data import EXTERNAL_BANGS
LEAF_KEY = chr(16)
@@ -40,9 +40,15 @@
def resolve_bang_definition(bang_definition, query):
url, rank = bang_definition.split(chr(1))
- url = url.replace(chr(2), quote_plus(query))
if url.startswith('//'):
url = 'https:' + url
+ if query:
+ url = url.replace(chr(2), quote_plus(query))
+ else:
+ # go to main instead of search page
+ o = urlparse(url)
+ url = o.scheme + '://' + o.netloc
+
rank = int(rank) if len(rank) > 0 else 0
return (url, rank)
|
{"golden_diff": "diff --git a/searx/external_bang.py b/searx/external_bang.py\n--- a/searx/external_bang.py\n+++ b/searx/external_bang.py\n@@ -1,6 +1,6 @@\n # SPDX-License-Identifier: AGPL-3.0-or-later\n \n-from urllib.parse import quote_plus\n+from urllib.parse import quote_plus, urlparse\n from searx.data import EXTERNAL_BANGS\n \n LEAF_KEY = chr(16)\n@@ -40,9 +40,15 @@\n \n def resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n- url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n+ if query:\n+ url = url.replace(chr(2), quote_plus(query))\n+ else:\n+ # go to main instead of search page\n+ o = urlparse(url)\n+ url = o.scheme + '://' + o.netloc\n+\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n", "issue": "Double bangs without query redirect to search URL instead of main page\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n`2023.4.21+98387e29`\r\n\r\n**How did you install SearXNG?**\r\n`searxng-docker`\r\n\r\n**What happened?**\r\nDouble bangs without query redirect to search URL instead of main page\r\n\r\n**How To Reproduce**\r\nUse external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=` \r\n\r\n**Expected behavior**\r\n`!!gh` should redirect to `https://github.com` if no search items\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom urllib.parse import quote_plus\nfrom searx.data import EXTERNAL_BANGS\n\nLEAF_KEY = chr(16)\n\n\ndef get_node(external_bangs_db, bang):\n node = external_bangs_db['trie']\n after = ''\n before = ''\n for bang_letter in bang:\n after += bang_letter\n if after in node and isinstance(node, dict):\n node = node[after]\n before += after\n after = ''\n return node, before, after\n\n\ndef get_bang_definition_and_ac(external_bangs_db, bang):\n node, before, after = get_node(external_bangs_db, bang)\n\n bang_definition = None\n bang_ac_list = []\n if after != '':\n for k in node:\n if k.startswith(after):\n bang_ac_list.append(before + k)\n elif isinstance(node, dict):\n bang_definition = node.get(LEAF_KEY)\n bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]\n elif isinstance(node, str):\n bang_definition = node\n bang_ac_list = []\n\n return bang_definition, bang_ac_list\n\n\ndef resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n\n\ndef get_bang_definition_and_autocomplete(bang, external_bangs_db=None):\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)\n\n new_autocomplete = []\n current = [*bang_ac_list]\n done = set()\n while len(current) > 0:\n bang_ac = current.pop(0)\n done.add(bang_ac)\n\n current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)\n if current_bang_definition:\n _, order = resolve_bang_definition(current_bang_definition, '')\n new_autocomplete.append((bang_ac, order))\n for new_bang in current_bang_ac_list:\n if new_bang not in done and new_bang not in current:\n current.append(new_bang)\n\n new_autocomplete.sort(key=lambda t: (-t[1], t[0]))\n new_autocomplete = list(map(lambda t: t[0], new_autocomplete))\n\n return bang_definition, new_autocomplete\n\n\ndef get_bang_url(search_query, external_bangs_db=None):\n \"\"\"\n Redirects if the user supplied a correct bang search.\n :param search_query: This is a search_query object which contains preferences and the submitted queries.\n :return: None if the bang was invalid, else a string of the redirect url.\n \"\"\"\n ret_val = None\n\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n if search_query.external_bang:\n bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)\n if bang_definition and isinstance(bang_definition, str):\n ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]\n\n return ret_val\n", "path": "searx/external_bang.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom urllib.parse import quote_plus, urlparse\nfrom searx.data import EXTERNAL_BANGS\n\nLEAF_KEY = chr(16)\n\n\ndef get_node(external_bangs_db, bang):\n node = external_bangs_db['trie']\n after = ''\n before = ''\n for bang_letter in bang:\n after += bang_letter\n if after in node and isinstance(node, dict):\n node = node[after]\n before += after\n after = ''\n return node, before, after\n\n\ndef get_bang_definition_and_ac(external_bangs_db, bang):\n node, before, after = get_node(external_bangs_db, bang)\n\n bang_definition = None\n bang_ac_list = []\n if after != '':\n for k in node:\n if k.startswith(after):\n bang_ac_list.append(before + k)\n elif isinstance(node, dict):\n bang_definition = node.get(LEAF_KEY)\n bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]\n elif isinstance(node, str):\n bang_definition = node\n bang_ac_list = []\n\n return bang_definition, bang_ac_list\n\n\ndef resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n if url.startswith('//'):\n url = 'https:' + url\n if query:\n url = url.replace(chr(2), quote_plus(query))\n else:\n # go to main instead of search page\n o = urlparse(url)\n url = o.scheme + '://' + o.netloc\n\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n\n\ndef get_bang_definition_and_autocomplete(bang, external_bangs_db=None):\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)\n\n new_autocomplete = []\n current = [*bang_ac_list]\n done = set()\n while len(current) > 0:\n bang_ac = current.pop(0)\n done.add(bang_ac)\n\n current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)\n if current_bang_definition:\n _, order = resolve_bang_definition(current_bang_definition, '')\n new_autocomplete.append((bang_ac, order))\n for new_bang in current_bang_ac_list:\n if new_bang not in done and new_bang not in current:\n current.append(new_bang)\n\n new_autocomplete.sort(key=lambda t: (-t[1], t[0]))\n new_autocomplete = list(map(lambda t: t[0], new_autocomplete))\n\n return bang_definition, new_autocomplete\n\n\ndef get_bang_url(search_query, external_bangs_db=None):\n \"\"\"\n Redirects if the user supplied a correct bang search.\n :param search_query: This is a search_query object which contains preferences and the submitted queries.\n :return: None if the bang was invalid, else a string of the redirect url.\n \"\"\"\n ret_val = None\n\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n if search_query.external_bang:\n bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)\n if bang_definition and isinstance(bang_definition, str):\n ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]\n\n return ret_val\n", "path": "searx/external_bang.py"}]}
| 1,395 | 253 |
gh_patches_debug_2243
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5023
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.vtvgo: '403 Client Error: Forbidden for url: ...'
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Last month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.
### Debug log
```text
streamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug
[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 5.1.2+4.g68dad105
[cli][debug] Dependencies:
[cli][debug] certifi: 2022.9.24
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] urllib3: 1.26.12
[cli][debug] websocket-client: 1.4.1
[cli][debug] importlib-metadata: 4.6.4
[cli][debug] Arguments:
[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
error: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/vtvgo.py`
Content:
```
1 """
2 $description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.
3 $url vtvgo.vn
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13
14 log = logging.getLogger(__name__)
15
16
17 @pluginmatcher(re.compile(
18 r"https?://vtvgo\.vn/xem-truc-tuyen-kenh-"
19 ))
20 class VTVgo(Plugin):
21 AJAX_URL = "https://vtvgo.vn/ajax-get-stream"
22
23 def _get_streams(self):
24 # get cookies
25 self.session.http.get("https://vtvgo.vn/")
26
27 self.session.http.headers.update({
28 "Origin": "https://vtvgo.vn",
29 "Referer": self.url,
30 "X-Requested-With": "XMLHttpRequest",
31 })
32
33 params = self.session.http.get(self.url, schema=validate.Schema(
34 validate.parse_html(),
35 validate.xml_xpath_string(".//script[contains(text(),'setplayer(')][1]/text()"),
36 validate.none_or_all(
37 validate.regex(
38 re.compile(r"""var\s+(?P<key>(?:type_)?id|time|token)\s*=\s*["']?(?P<value>[^"']+)["']?;"""),
39 method="findall",
40 ),
41 [
42 ("id", int),
43 ("type_id", str),
44 ("time", str),
45 ("token", str),
46 ],
47 ),
48 ))
49 if not params:
50 return
51
52 log.trace(f"{params!r}")
53 hls_url = self.session.http.post(
54 self.AJAX_URL,
55 data=dict(params),
56 schema=validate.Schema(
57 validate.parse_json(),
58 {"stream_url": [validate.url()]},
59 validate.get(("stream_url", 0)),
60 ),
61 )
62
63 return HLSStream.parse_variant_playlist(self.session, hls_url)
64
65
66 __plugin__ = VTVgo
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py
--- a/src/streamlink/plugins/vtvgo.py
+++ b/src/streamlink/plugins/vtvgo.py
@@ -27,6 +27,7 @@
self.session.http.headers.update({
"Origin": "https://vtvgo.vn",
"Referer": self.url,
+ "Sec-Fetch-Site": "same-origin",
"X-Requested-With": "XMLHttpRequest",
})
|
{"golden_diff": "diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py\n--- a/src/streamlink/plugins/vtvgo.py\n+++ b/src/streamlink/plugins/vtvgo.py\n@@ -27,6 +27,7 @@\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n+ \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n", "issue": "plugins.vtvgo: '403 Client Error: Forbidden for url: ...'\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nLast month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.\n\n### Debug log\n\n```text\nstreamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug\r\n[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 5.1.2+4.g68dad105\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.9.24\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] urllib3: 1.26.12\r\n[cli][debug] websocket-client: 1.4.1\r\n[cli][debug] importlib-metadata: 4.6.4\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\nerror: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py"}], "after_files": [{"content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py"}]}
| 1,429 | 108 |
gh_patches_debug_7931
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-14099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[service] msbuild package template has import incompatible with Conan 2.0
At https://github.com/conan-io/conan-center-index/blob/53ef0688c436c6697d8312d47fcdef68fe7e6955/docs/package_templates/msbuild_package/all/conanfile.py#L4, `vs_layout` is imported from `conan.tools.layout`. However, when using this import in [my PR](https://github.com/conan-io/conan-center-index/pull/13646), the CI informed me that the recipe was not ready for Conan v2, citing this import error:
```
ERROR: Error loading conanfile at '/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py': Unable to load conanfile in /home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py", line 8, in <module>
from conan.tools.layout import vs_layout
ImportError: cannot import name 'vs_layout' from 'conan.tools.layout' (/opt/pyenv/versions/3.7.13/lib/python3.7/site-packages/conan/tools/layout/__init__.py)
```
Therefore, it seems the msbuild template should import `vs_layout` from `conan.tools.microsoft` instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/package_templates/msbuild_package/all/conanfile.py`
Content:
```
1 from conan import ConanFile
2 from conan.errors import ConanInvalidConfiguration
3 from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
4 from conan.tools.layout import vs_layout
5 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file
6 import os
7
8
9 required_conan_version = ">=1.53.0"
10
11
12 class PackageConan(ConanFile):
13 name = "package"
14 description = "short description"
15 # Use short name only, conform to SPDX License List: https://spdx.org/licenses/
16 # In case not listed there, use "LicenseRef-<license-file-name>"
17 license = ""
18 url = "https://github.com/conan-io/conan-center-index"
19 homepage = "https://github.com/project/package"
20 # no "conan" and project name in topics. Use topics from the upstream listed on GH
21 topics = ("topic1", "topic2", "topic3")
22 settings = "os", "arch", "compiler", "build_type"
23 options = {
24 "shared": [True, False],
25 "fPIC": [True, False],
26 }
27 default_options = {
28 "shared": False,
29 "fPIC": True,
30 }
31
32 # no exports_sources attribute, but export_sources(self) method instead
33 # this allows finer grain exportation of patches per version
34 def export_sources(self):
35 export_conandata_patches(self)
36
37 def config_options(self):
38 if self.settings.os == "Windows":
39 del self.options.fPIC
40
41 def configure(self):
42 if self.options.shared:
43 self.options.rm_safe("fPIC")
44 # for plain C projects only
45 self.settings.rm_safe("compiler.libcxx")
46 self.settings.rm_safe("compiler.cppstd")
47
48 def layout(self):
49 vs_layout(self)
50
51 def requirements(self):
52 # prefer self.requires method instead of requires attribute
53 self.requires("dependency/0.8.1")
54
55 def validate(self):
56 # in case it does not work in another configuration, it should validated here too
57 if not is_msvc(self):
58 raise ConanInvalidConfiguration(f"{self.ref} can be built only by Visual Studio and msvc.")
59
60 # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)
61 def build_requirements(self):
62 self.tool_requires("tool/x.y.z")
63
64 def source(self):
65 get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
66
67 def generate(self):
68 tc = MSBuildToolchain(self)
69 tc.generate()
70 tc = MSBuildDeps(self)
71 tc.generate()
72 tc = VCVars(self)
73 tc.generate()
74
75 def _patch_sources(self):
76 apply_conandata_patches(self)
77 # remove bundled xxhash
78 rm(self, "whateer.*", os.path.join(self.source_folder, "lib"))
79 replace_in_file(self, os.path.join(self.source_folder, "CMakeLists.txt"), "...", "")
80
81 def build(self):
82 self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed
83 msbuild = MSBuild(self)
84 # customize to Release when RelWithDebInfo
85 msbuild.build_type = "Debug" if self.settings.build_type == "Debug" else "Release"
86 # use Win32 instead of the default value when building x86
87 msbuild.platform = "Win32" if self.settings.arch == "x86" else msbuild.platform
88 # customize according the solution file and compiler version
89 msbuild.build(sln="project_2017.sln")
90
91 def package(self):
92 copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
93 copy(
94 self, pattern="*.lib", dst=os.path.join(self.package_folder, "lib"), src=self.build_folder, keep_path=False
95 )
96 copy(
97 self, pattern="*.dll", dst=os.path.join(self.package_folder, "bin"), src=self.build_folder, keep_path=False
98 )
99 copy(
100 self,
101 pattern="*.h",
102 dst=os.path.join(self.package_folder, "include"),
103 src=os.path.join(self.source_folder, "include"),
104 )
105
106 def package_info(self):
107 self.cpp_info.libs = ["package_lib"]
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/package_templates/msbuild_package/all/conanfile.py b/docs/package_templates/msbuild_package/all/conanfile.py
--- a/docs/package_templates/msbuild_package/all/conanfile.py
+++ b/docs/package_templates/msbuild_package/all/conanfile.py
@@ -1,7 +1,6 @@
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
-from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
-from conan.tools.layout import vs_layout
+from conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file
import os
|
{"golden_diff": "diff --git a/docs/package_templates/msbuild_package/all/conanfile.py b/docs/package_templates/msbuild_package/all/conanfile.py\n--- a/docs/package_templates/msbuild_package/all/conanfile.py\n+++ b/docs/package_templates/msbuild_package/all/conanfile.py\n@@ -1,7 +1,6 @@\n from conan import ConanFile\n from conan.errors import ConanInvalidConfiguration\n-from conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\n-from conan.tools.layout import vs_layout\n+from conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\n from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\n import os\n", "issue": "[service] msbuild package template has import incompatible with Conan 2.0\nAt https://github.com/conan-io/conan-center-index/blob/53ef0688c436c6697d8312d47fcdef68fe7e6955/docs/package_templates/msbuild_package/all/conanfile.py#L4, `vs_layout` is imported from `conan.tools.layout`. However, when using this import in [my PR](https://github.com/conan-io/conan-center-index/pull/13646), the CI informed me that the recipe was not ready for Conan v2, citing this import error:\r\n\r\n```\r\nERROR: Error loading conanfile at '/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py': Unable to load conanfile in /home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/conan/w/prod_cci_PR-13646/recipes/lzham/all/conanfile.py\", line 8, in <module>\r\n from conan.tools.layout import vs_layout\r\nImportError: cannot import name 'vs_layout' from 'conan.tools.layout' (/opt/pyenv/versions/3.7.13/lib/python3.7/site-packages/conan/tools/layout/__init__.py)\r\n```\r\nTherefore, it seems the msbuild template should import `vs_layout` from `conan.tools.microsoft` instead.\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.microsoft import is_msvc, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\nfrom conan.tools.layout import vs_layout\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\nimport os\n\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass PackageConan(ConanFile):\n name = \"package\"\n description = \"short description\"\n # Use short name only, conform to SPDX License List: https://spdx.org/licenses/\n # In case not listed there, use \"LicenseRef-<license-file-name>\"\n license = \"\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/project/package\"\n # no \"conan\" and project name in topics. Use topics from the upstream listed on GH\n topics = (\"topic1\", \"topic2\", \"topic3\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n # no exports_sources attribute, but export_sources(self) method instead\n # this allows finer grain exportation of patches per version\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n # for plain C projects only\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n vs_layout(self)\n\n def requirements(self):\n # prefer self.requires method instead of requires attribute\n self.requires(\"dependency/0.8.1\")\n\n def validate(self):\n # in case it does not work in another configuration, it should validated here too\n if not is_msvc(self):\n raise ConanInvalidConfiguration(f\"{self.ref} can be built only by Visual Studio and msvc.\")\n\n # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)\n def build_requirements(self):\n self.tool_requires(\"tool/x.y.z\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = MSBuildToolchain(self)\n tc.generate()\n tc = MSBuildDeps(self)\n tc.generate()\n tc = VCVars(self)\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # remove bundled xxhash\n rm(self, \"whateer.*\", os.path.join(self.source_folder, \"lib\"))\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"...\", \"\")\n\n def build(self):\n self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed\n msbuild = MSBuild(self)\n # customize to Release when RelWithDebInfo\n msbuild.build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n # use Win32 instead of the default value when building x86\n msbuild.platform = \"Win32\" if self.settings.arch == \"x86\" else msbuild.platform\n # customize according the solution file and compiler version\n msbuild.build(sln=\"project_2017.sln\")\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self, pattern=\"*.lib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False\n )\n copy(\n self, pattern=\"*.dll\", dst=os.path.join(self.package_folder, \"bin\"), src=self.build_folder, keep_path=False\n )\n copy(\n self,\n pattern=\"*.h\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=os.path.join(self.source_folder, \"include\"),\n )\n\n def package_info(self):\n self.cpp_info.libs = [\"package_lib\"]\n", "path": "docs/package_templates/msbuild_package/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.microsoft import is_msvc, vs_layout, MSBuildDeps, MSBuildToolchain, MSBuild, VCVars\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, replace_in_file\nimport os\n\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass PackageConan(ConanFile):\n name = \"package\"\n description = \"short description\"\n # Use short name only, conform to SPDX License List: https://spdx.org/licenses/\n # In case not listed there, use \"LicenseRef-<license-file-name>\"\n license = \"\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/project/package\"\n # no \"conan\" and project name in topics. Use topics from the upstream listed on GH\n topics = (\"topic1\", \"topic2\", \"topic3\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n # no exports_sources attribute, but export_sources(self) method instead\n # this allows finer grain exportation of patches per version\n def export_sources(self):\n export_conandata_patches(self)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n self.options.rm_safe(\"fPIC\")\n # for plain C projects only\n self.settings.rm_safe(\"compiler.libcxx\")\n self.settings.rm_safe(\"compiler.cppstd\")\n\n def layout(self):\n # src_folder must use the same source folder name the project\n vs_layout(self, src_folder=\"src\")\n\n def requirements(self):\n # prefer self.requires method instead of requires attribute\n self.requires(\"dependency/0.8.1\")\n\n def validate(self):\n # in case it does not work in another configuration, it should validated here too\n if not is_msvc(self):\n raise ConanInvalidConfiguration(f\"{self.ref} can be built only by Visual Studio and msvc.\")\n\n # if another tool than the compiler or CMake is required to build the project (pkgconf, bison, flex etc)\n def build_requirements(self):\n self.tool_requires(\"tool/x.y.z\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], destination=self.source_folder, strip_root=True)\n\n def generate(self):\n tc = MSBuildToolchain(self)\n tc.generate()\n tc = MSBuildDeps(self)\n tc.generate()\n tc = VCVars(self)\n tc.generate()\n\n def _patch_sources(self):\n apply_conandata_patches(self)\n # remove bundled xxhash\n rm(self, \"whateer.*\", os.path.join(self.source_folder, \"lib\"))\n replace_in_file(self, os.path.join(self.source_folder, \"CMakeLists.txt\"), \"...\", \"\")\n\n def build(self):\n self._patch_sources() # It can be apply_conandata_patches(self) only in case no more patches are needed\n msbuild = MSBuild(self)\n # customize to Release when RelWithDebInfo\n msbuild.build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n # use Win32 instead of the default value when building x86\n msbuild.platform = \"Win32\" if self.settings.arch == \"x86\" else msbuild.platform\n # customize according the solution file and compiler version\n msbuild.build(sln=\"project_2017.sln\")\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self, pattern=\"*.lib\", dst=os.path.join(self.package_folder, \"lib\"), src=self.build_folder, keep_path=False\n )\n copy(\n self, pattern=\"*.dll\", dst=os.path.join(self.package_folder, \"bin\"), src=self.build_folder, keep_path=False\n )\n copy(\n self,\n pattern=\"*.h\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=os.path.join(self.source_folder, \"include\"),\n )\n\n def package_info(self):\n self.cpp_info.libs = [\"package_lib\"]\n", "path": "docs/package_templates/msbuild_package/all/conanfile.py"}]}
| 1,837 | 180 |
gh_patches_debug_29824
|
rasdani/github-patches
|
git_diff
|
microsoft__Qcodes-531
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a
### Steps to reproduce
1. Connect Keithley 2600
2. ask it for output status or mode
3. doh
### Expected behaviour
tell me what i asked for
### Actual behaviour
error because parsing fails
issue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)
### System
**operating system**
W10
**qcodes branch**
master
**qcodes commit**
177f8201b3778c0071c60d4e4c658449284e1a6c
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument_drivers/tektronix/Keithley_2600.py`
Content:
```
1 from qcodes import VisaInstrument
2
3
4 class Keithley_2600(VisaInstrument):
5 """
6 channel: use channel 'a' or 'b'
7
8 This is the qcodes driver for the Keithley_2600 Source-Meter series,
9 tested with Keithley_2614B
10
11 Status: beta-version.
12 TODO:
13 - Add all parameters that are in the manual
14 - range and limit should be set according to mode
15 - add ramping and such stuff
16
17 """
18 def __init__(self, name, address, channel, **kwargs):
19 super().__init__(name, address, terminator='\n', **kwargs)
20 self._channel = channel
21
22 self.add_parameter('volt', get_cmd='measure.v()',
23 get_parser=float, set_cmd='source.levelv={:.12f}',
24 label='Voltage',
25 unit='V')
26 self.add_parameter('curr', get_cmd='measure.i()',
27 get_parser=float, set_cmd='source.leveli={:.12f}',
28 label='Current',
29 unit='A')
30 self.add_parameter('mode',
31 get_cmd='source.func',
32 set_cmd='source.func={:d}',
33 val_mapping={'current': 0, 'voltage': 1})
34 self.add_parameter('output',
35 get_cmd='source.output',
36 set_cmd='source.output={:d}',
37 val_mapping={'on': 1, 'off': 0})
38 # Source range
39 # needs get after set
40 self.add_parameter('rangev',
41 get_cmd='source.rangev',
42 get_parser=float,
43 set_cmd='source.rangev={:.4f}',
44 unit='V')
45 # Measure range
46 # needs get after set
47 self.add_parameter('rangei',
48 get_cmd='source.rangei',
49 get_parser=float,
50 set_cmd='source.rangei={:.4f}',
51 unit='A')
52 # Compliance limit
53 self.add_parameter('limitv',
54 get_cmd='source.limitv',
55 get_parser=float,
56 set_cmd='source.limitv={:.4f}',
57 unit='V')
58 # Compliance limit
59 self.add_parameter('limiti',
60 get_cmd='source.limiti',
61 get_parser=float,
62 set_cmd='source.limiti={:.4f}',
63 unit='A')
64
65 self.connect_message()
66
67 def get_idn(self):
68 IDN = self.ask_raw('*IDN?')
69 vendor, model, serial, firmware = map(str.strip, IDN.split(','))
70 model = model[6:]
71
72 IDN = {'vendor': vendor, 'model': model,
73 'serial': serial, 'firmware': firmware}
74 return IDN
75
76 def reset(self):
77 self.write('reset()')
78
79 def ask(self, cmd):
80 return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))
81
82 def write(self, cmd):
83 super().write('smu{:s}.{:s}'.format(self._channel, cmd))
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py
+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
@@ -19,20 +19,26 @@
super().__init__(name, address, terminator='\n', **kwargs)
self._channel = channel
- self.add_parameter('volt', get_cmd='measure.v()',
- get_parser=float, set_cmd='source.levelv={:.12f}',
+ self.add_parameter('volt',
+ get_cmd='measure.v()',
+ get_parser=float,
+ set_cmd='source.levelv={:.12f}',
label='Voltage',
unit='V')
- self.add_parameter('curr', get_cmd='measure.i()',
- get_parser=float, set_cmd='source.leveli={:.12f}',
+ self.add_parameter('curr',
+ get_cmd='measure.i()',
+ get_parser=float,
+ set_cmd='source.leveli={:.12f}',
label='Current',
unit='A')
self.add_parameter('mode',
get_cmd='source.func',
+ get_parser=float,
set_cmd='source.func={:d}',
val_mapping={'current': 0, 'voltage': 1})
self.add_parameter('output',
get_cmd='source.output',
+ get_parser=float,
set_cmd='source.output={:d}',
val_mapping={'on': 1, 'off': 0})
# Source range
|
{"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n@@ -19,20 +19,26 @@\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n \n- self.add_parameter('volt', get_cmd='measure.v()',\n- get_parser=float, set_cmd='source.levelv={:.12f}',\n+ self.add_parameter('volt',\n+ get_cmd='measure.v()',\n+ get_parser=float,\n+ set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n- self.add_parameter('curr', get_cmd='measure.i()',\n- get_parser=float, set_cmd='source.leveli={:.12f}',\n+ self.add_parameter('curr',\n+ get_cmd='measure.i()',\n+ get_parser=float,\n+ set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n+ get_parser=float,\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n+ get_parser=float,\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n", "issue": "Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a\n\r\n### Steps to reproduce\r\n1. Connect Keithley 2600\r\n2. ask it for output status or mode\r\n3. doh\r\n\r\n### Expected behaviour\r\ntell me what i asked for\r\n\r\n### Actual behaviour\r\nerror because parsing fails\r\nissue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)\r\n\r\n\r\n### System\r\n**operating system**\r\nW10\r\n**qcodes branch**\r\nmaster\r\n**qcodes commit**\r\n177f8201b3778c0071c60d4e4c658449284e1a6c \n", "before_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}], "after_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt',\n get_cmd='measure.v()',\n get_parser=float,\n set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr',\n get_cmd='measure.i()',\n get_parser=float,\n set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n get_parser=float,\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n get_parser=float,\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}]}
| 1,309 | 382 |
gh_patches_debug_26054
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-636
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--proxy-headers with supervisor get client ip failed
#### uvicorn version
`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux`
#### supervisor config
```
[fcgi-program:uvicorn]
socket=tcp://0.0.0.0:5000
directory=/var/www/webapp/
command=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app
numprocs=2
process_name=uvicorn-%(process_num)d
stdout_logfile_maxbytes=0
stdout_logfile=/var/log/uvicorn.log
```
### fastapi
version 0.52.0
```
# cat /var/www/webapp/main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fastapi import FastAPI, Request
app = FastAPI(redoc_url=None)
@app.get('/')
def index(request: Request):
return 'hello {}!'.format(request.client.host)
```
### run on supervisor
supervisord version 4.1.0
```
supervisord -n
curl http://127.0.0.1:5000
```
why client.host is None?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/utils.py`
Content:
```
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 try:
8 info = socket_info.getpeername()
9 except OSError:
10 # This case appears to inconsistently occur with uvloop
11 # bound to a unix domain socket.
12 family = None
13 info = None
14 else:
15 family = socket_info.family
16
17 if family in (socket.AF_INET, socket.AF_INET6):
18 return (str(info[0]), int(info[1]))
19 return None
20 info = transport.get_extra_info("peername")
21 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
22 return (str(info[0]), int(info[1]))
23 return None
24
25
26 def get_local_addr(transport):
27 socket_info = transport.get_extra_info("socket")
28 if socket_info is not None:
29 info = socket_info.getsockname()
30 family = socket_info.family
31 if family in (socket.AF_INET, socket.AF_INET6):
32 return (str(info[0]), int(info[1]))
33 return None
34 info = transport.get_extra_info("sockname")
35 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
36 return (str(info[0]), int(info[1]))
37 return None
38
39
40 def is_ssl(transport):
41 return bool(transport.get_extra_info("sslcontext"))
42
43
44 def get_client_addr(scope):
45 client = scope.get("client")
46 if not client:
47 return ""
48 return "%s:%d" % client
49
50
51 def get_path_with_query_string(scope):
52 path_with_query_string = scope.get("root_path", "") + scope["path"]
53 if scope["query_string"]:
54 path_with_query_string = "{}?{}".format(
55 path_with_query_string, scope["query_string"].decode("ascii")
56 )
57 return path_with_query_string
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -1,5 +1,10 @@
import socket
+if hasattr(socket, "AF_UNIX"):
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)
+else:
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)
+
def get_remote_addr(transport):
socket_info = transport.get_extra_info("socket")
@@ -14,8 +19,9 @@
else:
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
+
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
@@ -28,7 +34,7 @@
if socket_info is not None:
info = socket_info.getsockname()
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
return None
info = transport.get_extra_info("sockname")
|
{"golden_diff": "diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -1,5 +1,10 @@\n import socket\n \n+if hasattr(socket, \"AF_UNIX\"):\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)\n+else:\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)\n+\n \n def get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n@@ -14,8 +19,9 @@\n else:\n family = socket_info.family\n \n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n+\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n@@ -28,7 +34,7 @@\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n", "issue": "--proxy-headers with supervisor get client ip failed\n#### uvicorn version\r\n`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux` \r\n#### supervisor config\r\n```\r\n[fcgi-program:uvicorn]\r\nsocket=tcp://0.0.0.0:5000 \r\ndirectory=/var/www/webapp/ \r\ncommand=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app \r\nnumprocs=2 \r\nprocess_name=uvicorn-%(process_num)d \r\nstdout_logfile_maxbytes=0 \r\nstdout_logfile=/var/log/uvicorn.log \r\n```\r\n\r\n### fastapi \r\nversion 0.52.0\r\n\r\n```\r\n# cat /var/www/webapp/main.py\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom fastapi import FastAPI, Request\r\n\r\napp = FastAPI(redoc_url=None)\r\n\r\n\r\[email protected]('/')\r\ndef index(request: Request):\r\n return 'hello {}!'.format(request.client.host)\r\n\r\n```\r\n\r\n### run on supervisor\r\nsupervisord version 4.1.0\r\n```\r\nsupervisord -n\r\ncurl http://127.0.0.1:5000\r\n\r\n```\r\nwhy client.host is None?\r\n\n", "before_files": [{"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}], "after_files": [{"content": "import socket\n\nif hasattr(socket, \"AF_UNIX\"):\n SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)\nelse:\n SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]}
| 1,063 | 320 |
gh_patches_debug_38005
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1610
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
toga-winforms v0.3.0.dev38 gives Error on Windows app
Toga-winforms v0.3.0.dev38 gives AttributeError: 'TextInput' object has not attribute '_on_change'
**To Reproduce**
1. Create an app that uses toga.TextInput(),
2. Set toga-winforms==0.3.0.dev38 under Windows requirements in pyproject.toml file.
3. Update dependencies: `update -d`
4. Build and run app on Windows
**Screenshots**

**Environment:**
- Operating System: Windows 10
- Python version: 3.8.10
- Software versions:
- Briefcase: 0.3.9
- Toga: 0.3.0.dev38
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/textinput/textinput/app.py`
Content:
```
1 from string import ascii_lowercase, ascii_uppercase, digits
2
3 import toga
4 from toga.constants import COLUMN
5 from toga.style import Pack
6 from toga import validators
7
8 EMPTY_PASSWORD = 'Empty password'
9
10
11 class TextInputApp(toga.App):
12 # Button callback functions
13 def do_extract_values(self, widget, **kwargs):
14 # Disable all the text inputs
15 self.text_input.enabled = False
16 self.password_input.enabled = False
17 self.number_input.enabled = False
18
19 # Update the labels with the extracted values
20 self.text_label.text = "Text content: {}".format(
21 self.text_input.value
22 )
23
24 self.password_label.text = "Your password is {}: {}".format(
25 "valid" if self.password_input.is_valid else "invalid",
26 self.password_input.value
27 )
28
29 number = self.number_input.value
30 if number:
31 self.number_label.text = "Double the number is: {}".format(number * 2)
32 else:
33 self.number_label.text = "You didn't enter a number"
34
35 # Wait 5 seconds
36 for i in range(5, 0, -1):
37 self.label.text = 'Counting down from {}...'.format(i)
38 yield 1
39 self.label.text = 'Enter some values and press extract.'
40
41 # Renable the inputs again.
42 self.text_input.enabled = True
43 self.password_input.enabled = True
44 self.number_input.enabled = True
45
46 def startup(self):
47 # Set up main window
48 self.main_window = toga.MainWindow(title=self.name)
49
50 # Labels to show responses.
51 self.label = toga.Label(
52 'Enter some values and press extract.', style=Pack(padding=10)
53 )
54 self.text_label = toga.Label('Ready.', style=Pack(padding=10))
55 self.password_label = toga.Label('Ready.', style=Pack(padding=10))
56 self.password_content_label = toga.Label(
57 EMPTY_PASSWORD, style=Pack(padding_bottom=10, font_size=9)
58 )
59 self.number_label = toga.Label('Ready.', style=Pack(padding=10))
60
61 # Text inputs and a button
62 self.text_input = toga.TextInput(
63 placeholder='Type something...', style=Pack(padding=10)
64 )
65 self.password_input = toga.PasswordInput(
66 placeholder='Password...',
67 style=Pack(padding=10),
68 on_change=self.on_password_change,
69 validators=[
70 validators.MinLength(10),
71 validators.ContainsUppercase(),
72 validators.ContainsLowercase(),
73 validators.ContainsSpecial(),
74 validators.ContainsDigit()
75 ]
76 )
77 self.email_input = toga.TextInput(
78 placeholder='Email...',
79 style=Pack(padding=10),
80 validators=[validators.Email()]
81 )
82 self.number_input = toga.NumberInput(style=Pack(padding=10))
83 btn_extract = toga.Button(
84 'Extract values',
85 on_press=self.do_extract_values,
86 style=Pack(flex=1),
87 )
88
89 # Outermost box
90 box = toga.Box(
91 children=[
92 self.label,
93 self.text_input,
94 self.password_input,
95 self.password_content_label,
96 self.email_input,
97 self.number_input,
98 self.text_label,
99 self.password_label,
100 self.number_label,
101 btn_extract,
102 ],
103 style=Pack(
104 flex=1,
105 direction=COLUMN,
106 padding=10,
107 )
108 )
109
110 # Add the content on the main window
111 self.main_window.content = box
112
113 # Show the main window
114 self.main_window.show()
115
116 def on_password_change(self, widget):
117 content = widget.value
118 self.password_content_label.text = self.get_password_content_label(content)
119
120 def get_password_content_label(self, content):
121 if content.strip() == "":
122 return EMPTY_PASSWORD
123 contains = set()
124 for letter in content:
125 if letter in ascii_uppercase:
126 contains.add("uppercase letters")
127 elif letter in ascii_lowercase:
128 contains.add("lowercase letters")
129 elif letter in digits:
130 contains.add("digits")
131 else:
132 contains.add("special characters")
133 return "Password contains: {}".format(', '.join(contains))
134
135
136 def main():
137 return TextInputApp('TextInput', 'org.beeware.widgets.textinput')
138
139
140 if __name__ == '__main__':
141 app = main()
142 app.main_loop()
143
```
Path: `src/core/toga/widgets/textinput.py`
Content:
```
1 import warnings
2
3 from toga.handlers import wrapped_handler
4
5 from .base import Widget
6
7
8 class TextInput(Widget):
9 """ A widget get user input.
10
11 Args:
12 id (str): An identifier for this widget.
13 style (:obj:`Style`): An optional style object. If no style is provided then
14 a new one will be created for the widget.
15 factory (:obj:`module`): A python module that is capable to return a
16 implementation of this class with the same name. (optional & normally not needed)
17 value (str): The initial text for the input.
18 placeholder (str): If no input is present this text is shown.
19 readonly (bool): Whether a user can write into the text input, defaults to `False`.
20 on_change (Callable): Method to be called when text is changed in text box
21 validators (list): list of validators to run on the value of the text box. Should
22 return None is value is valid and an error message if not.
23 on_change (``callable``): The handler to invoke when the text changes.
24 on_gain_focus (:obj:`callable`): Function to execute when get focused.
25 on_lose_focus (:obj:`callable`): Function to execute when lose focus.
26 """
27 MIN_WIDTH = 100
28
29 def __init__(
30 self,
31 id=None,
32 style=None,
33 factory=None,
34 value=None,
35 placeholder=None,
36 readonly=False,
37 on_change=None,
38 on_gain_focus=None,
39 on_lose_focus=None,
40 validators=None,
41 initial=None, # DEPRECATED!
42 ):
43 super().__init__(id=id, style=style, factory=factory)
44
45 # Create a platform specific implementation of the widget
46 self._create()
47
48 ##################################################################
49 # 2022-07: Backwards compatibility
50 ##################################################################
51
52 # initial replaced with value
53 if initial is not None:
54 if value is not None:
55 raise ValueError(
56 "Cannot specify both `initial` and `value`; "
57 "`initial` has been deprecated, use `value`"
58 )
59 else:
60 warnings.warn(
61 "`initial` has been renamed `value`", DeprecationWarning
62 )
63 value = initial
64
65 ##################################################################
66 # End backwards compatibility.
67 ##################################################################
68
69 self.placeholder = placeholder
70 self.readonly = readonly
71
72 # Set the actual value before on_change, because we do not want on_change triggered by it
73 self.value = value
74 self.on_change = on_change
75 self.validators = validators
76 self.on_lose_focus = on_lose_focus
77 self.on_gain_focus = on_gain_focus
78
79 def _create(self):
80 self._impl = self.factory.TextInput(interface=self)
81
82 @property
83 def readonly(self):
84 """ Whether a user can write into the text input
85
86 Returns:
87 ``True`` if only read is possible.
88 ``False`` if read and write is possible.
89 """
90 return self._readonly
91
92 @readonly.setter
93 def readonly(self, value):
94 self._readonly = value
95 self._impl.set_readonly(value)
96
97 @property
98 def placeholder(self):
99 """ The placeholder text.
100
101 Returns:
102 The placeholder text as a ``str``.
103 """
104 return self._placeholder
105
106 @placeholder.setter
107 def placeholder(self, value):
108 if value is None:
109 self._placeholder = ''
110 else:
111 self._placeholder = str(value)
112 self._impl.set_placeholder(value)
113
114 @property
115 def value(self):
116 """ The value of the text input field
117
118 Returns:
119 The current text of the widget as a ``str``.
120 """
121 return self._impl.get_value()
122
123 @value.setter
124 def value(self, value):
125 if value is None:
126 v = ''
127 else:
128 v = str(value)
129 self._impl.set_value(v)
130
131 @property
132 def is_valid(self):
133 return self._impl.is_valid()
134
135 def clear(self):
136 """ Clears the text of the widget """
137 self.value = ''
138
139 @property
140 def on_change(self):
141 """The handler to invoke when the value changes
142
143 Returns:
144 The function ``callable`` that is called on a content change.
145 """
146 return self._on_change
147
148 @on_change.setter
149 def on_change(self, handler):
150 """Set the handler to invoke when the value is changed.
151
152 Args:
153 handler (:obj:`callable`): The handler to invoke when the value is changed.
154 """
155 self._on_change = wrapped_handler(self, handler)
156 self._impl.set_on_change(self._on_change)
157
158 @property
159 def validators(self):
160 return self._validators
161
162 @validators.setter
163 def validators(self, validators):
164 if validators is None:
165 self._validators = []
166 else:
167 self._validators = validators
168 self.validate()
169
170 @property
171 def on_gain_focus(self):
172 """The handler to invoke when the widget get focus.
173
174 Returns:
175 The function ``callable`` that is called on widget focus gain.
176 """
177 return self._on_gain_focus
178
179 @on_gain_focus.setter
180 def on_gain_focus(self, handler):
181 self._on_gain_focus = wrapped_handler(self, handler)
182 self._impl.set_on_gain_focus(self._on_gain_focus)
183
184 @property
185 def on_lose_focus(self):
186 """The handler to invoke when the widget lose focus.
187
188 Returns:
189 The function ``callable`` that is called on widget focus loss.
190 """
191 return self._on_lose_focus
192
193 @on_lose_focus.setter
194 def on_lose_focus(self, handler):
195 self._on_lose_focus = wrapped_handler(self, handler)
196 self._impl.set_on_lose_focus(self._on_lose_focus)
197
198 def validate(self):
199 error_message = None
200 for validator in self.validators:
201 if error_message is None:
202 error_message = validator(self.value)
203
204 if error_message is None:
205 self._impl.clear_error()
206 return True
207 else:
208 self._impl.set_error(error_message)
209 return False
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/textinput/textinput/app.py b/examples/textinput/textinput/app.py
--- a/examples/textinput/textinput/app.py
+++ b/examples/textinput/textinput/app.py
@@ -13,12 +13,14 @@
def do_extract_values(self, widget, **kwargs):
# Disable all the text inputs
self.text_input.enabled = False
+ self.text_input_placeholder.enabled = False
self.password_input.enabled = False
self.number_input.enabled = False
# Update the labels with the extracted values
- self.text_label.text = "Text content: {}".format(
- self.text_input.value
+ self.text_label.text = "Text content: {}; {}".format(
+ self.text_input.value,
+ self.text_input_placeholder.value,
)
self.password_label.text = "Your password is {}: {}".format(
@@ -40,6 +42,7 @@
# Renable the inputs again.
self.text_input.enabled = True
+ self.text_input_placeholder.enabled = True
self.password_input.enabled = True
self.number_input.enabled = True
@@ -60,6 +63,10 @@
# Text inputs and a button
self.text_input = toga.TextInput(
+ value='Initial value',
+ placeholder='Type something...', style=Pack(padding=10)
+ )
+ self.text_input_placeholder = toga.TextInput(
placeholder='Type something...', style=Pack(padding=10)
)
self.password_input = toga.PasswordInput(
@@ -91,6 +98,7 @@
children=[
self.label,
self.text_input,
+ self.text_input_placeholder,
self.password_input,
self.password_content_label,
self.email_input,
diff --git a/src/core/toga/widgets/textinput.py b/src/core/toga/widgets/textinput.py
--- a/src/core/toga/widgets/textinput.py
+++ b/src/core/toga/widgets/textinput.py
@@ -70,7 +70,10 @@
self.readonly = readonly
# Set the actual value before on_change, because we do not want on_change triggered by it
+ # However, we need to prime the handler property in case it is accessed.
+ self._on_change = None
self.value = value
+
self.on_change = on_change
self.validators = validators
self.on_lose_focus = on_lose_focus
|
{"golden_diff": "diff --git a/examples/textinput/textinput/app.py b/examples/textinput/textinput/app.py\n--- a/examples/textinput/textinput/app.py\n+++ b/examples/textinput/textinput/app.py\n@@ -13,12 +13,14 @@\n def do_extract_values(self, widget, **kwargs):\n # Disable all the text inputs\n self.text_input.enabled = False\n+ self.text_input_placeholder.enabled = False\n self.password_input.enabled = False\n self.number_input.enabled = False\n \n # Update the labels with the extracted values\n- self.text_label.text = \"Text content: {}\".format(\n- self.text_input.value\n+ self.text_label.text = \"Text content: {}; {}\".format(\n+ self.text_input.value,\n+ self.text_input_placeholder.value,\n )\n \n self.password_label.text = \"Your password is {}: {}\".format(\n@@ -40,6 +42,7 @@\n \n # Renable the inputs again.\n self.text_input.enabled = True\n+ self.text_input_placeholder.enabled = True\n self.password_input.enabled = True\n self.number_input.enabled = True\n \n@@ -60,6 +63,10 @@\n \n # Text inputs and a button\n self.text_input = toga.TextInput(\n+ value='Initial value',\n+ placeholder='Type something...', style=Pack(padding=10)\n+ )\n+ self.text_input_placeholder = toga.TextInput(\n placeholder='Type something...', style=Pack(padding=10)\n )\n self.password_input = toga.PasswordInput(\n@@ -91,6 +98,7 @@\n children=[\n self.label,\n self.text_input,\n+ self.text_input_placeholder,\n self.password_input,\n self.password_content_label,\n self.email_input,\ndiff --git a/src/core/toga/widgets/textinput.py b/src/core/toga/widgets/textinput.py\n--- a/src/core/toga/widgets/textinput.py\n+++ b/src/core/toga/widgets/textinput.py\n@@ -70,7 +70,10 @@\n self.readonly = readonly\n \n # Set the actual value before on_change, because we do not want on_change triggered by it\n+ # However, we need to prime the handler property in case it is accessed.\n+ self._on_change = None\n self.value = value\n+\n self.on_change = on_change\n self.validators = validators\n self.on_lose_focus = on_lose_focus\n", "issue": "toga-winforms v0.3.0.dev38 gives Error on Windows app\nToga-winforms v0.3.0.dev38 gives AttributeError: 'TextInput' object has not attribute '_on_change'\r\n\r\n**To Reproduce**\r\n1. Create an app that uses toga.TextInput(),\r\n2. Set toga-winforms==0.3.0.dev38 under Windows requirements in pyproject.toml file.\r\n3. Update dependencies: `update -d`\r\n4. Build and run app on Windows\r\n\r\n**Screenshots**\r\n\r\n\r\n**Environment:**\r\n - Operating System: Windows 10\r\n - Python version: 3.8.10\r\n - Software versions:\r\n - Briefcase: 0.3.9\r\n - Toga: 0.3.0.dev38\r\n\n", "before_files": [{"content": "from string import ascii_lowercase, ascii_uppercase, digits\n\nimport toga\nfrom toga.constants import COLUMN\nfrom toga.style import Pack\nfrom toga import validators\n\nEMPTY_PASSWORD = 'Empty password'\n\n\nclass TextInputApp(toga.App):\n # Button callback functions\n def do_extract_values(self, widget, **kwargs):\n # Disable all the text inputs\n self.text_input.enabled = False\n self.password_input.enabled = False\n self.number_input.enabled = False\n\n # Update the labels with the extracted values\n self.text_label.text = \"Text content: {}\".format(\n self.text_input.value\n )\n\n self.password_label.text = \"Your password is {}: {}\".format(\n \"valid\" if self.password_input.is_valid else \"invalid\",\n self.password_input.value\n )\n\n number = self.number_input.value\n if number:\n self.number_label.text = \"Double the number is: {}\".format(number * 2)\n else:\n self.number_label.text = \"You didn't enter a number\"\n\n # Wait 5 seconds\n for i in range(5, 0, -1):\n self.label.text = 'Counting down from {}...'.format(i)\n yield 1\n self.label.text = 'Enter some values and press extract.'\n\n # Renable the inputs again.\n self.text_input.enabled = True\n self.password_input.enabled = True\n self.number_input.enabled = True\n\n def startup(self):\n # Set up main window\n self.main_window = toga.MainWindow(title=self.name)\n\n # Labels to show responses.\n self.label = toga.Label(\n 'Enter some values and press extract.', style=Pack(padding=10)\n )\n self.text_label = toga.Label('Ready.', style=Pack(padding=10))\n self.password_label = toga.Label('Ready.', style=Pack(padding=10))\n self.password_content_label = toga.Label(\n EMPTY_PASSWORD, style=Pack(padding_bottom=10, font_size=9)\n )\n self.number_label = toga.Label('Ready.', style=Pack(padding=10))\n\n # Text inputs and a button\n self.text_input = toga.TextInput(\n placeholder='Type something...', style=Pack(padding=10)\n )\n self.password_input = toga.PasswordInput(\n placeholder='Password...',\n style=Pack(padding=10),\n on_change=self.on_password_change,\n validators=[\n validators.MinLength(10),\n validators.ContainsUppercase(),\n validators.ContainsLowercase(),\n validators.ContainsSpecial(),\n validators.ContainsDigit()\n ]\n )\n self.email_input = toga.TextInput(\n placeholder='Email...',\n style=Pack(padding=10),\n validators=[validators.Email()]\n )\n self.number_input = toga.NumberInput(style=Pack(padding=10))\n btn_extract = toga.Button(\n 'Extract values',\n on_press=self.do_extract_values,\n style=Pack(flex=1),\n )\n\n # Outermost box\n box = toga.Box(\n children=[\n self.label,\n self.text_input,\n self.password_input,\n self.password_content_label,\n self.email_input,\n self.number_input,\n self.text_label,\n self.password_label,\n self.number_label,\n btn_extract,\n ],\n style=Pack(\n flex=1,\n direction=COLUMN,\n padding=10,\n )\n )\n\n # Add the content on the main window\n self.main_window.content = box\n\n # Show the main window\n self.main_window.show()\n\n def on_password_change(self, widget):\n content = widget.value\n self.password_content_label.text = self.get_password_content_label(content)\n\n def get_password_content_label(self, content):\n if content.strip() == \"\":\n return EMPTY_PASSWORD\n contains = set()\n for letter in content:\n if letter in ascii_uppercase:\n contains.add(\"uppercase letters\")\n elif letter in ascii_lowercase:\n contains.add(\"lowercase letters\")\n elif letter in digits:\n contains.add(\"digits\")\n else:\n contains.add(\"special characters\")\n return \"Password contains: {}\".format(', '.join(contains))\n\n\ndef main():\n return TextInputApp('TextInput', 'org.beeware.widgets.textinput')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/textinput/textinput/app.py"}, {"content": "import warnings\n\nfrom toga.handlers import wrapped_handler\n\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n \"\"\" A widget get user input.\n\n Args:\n id (str): An identifier for this widget.\n style (:obj:`Style`): An optional style object. If no style is provided then\n a new one will be created for the widget.\n factory (:obj:`module`): A python module that is capable to return a\n implementation of this class with the same name. (optional & normally not needed)\n value (str): The initial text for the input.\n placeholder (str): If no input is present this text is shown.\n readonly (bool): Whether a user can write into the text input, defaults to `False`.\n on_change (Callable): Method to be called when text is changed in text box\n validators (list): list of validators to run on the value of the text box. Should\n return None is value is valid and an error message if not.\n on_change (``callable``): The handler to invoke when the text changes.\n on_gain_focus (:obj:`callable`): Function to execute when get focused.\n on_lose_focus (:obj:`callable`): Function to execute when lose focus.\n \"\"\"\n MIN_WIDTH = 100\n\n def __init__(\n self,\n id=None,\n style=None,\n factory=None,\n value=None,\n placeholder=None,\n readonly=False,\n on_change=None,\n on_gain_focus=None,\n on_lose_focus=None,\n validators=None,\n initial=None, # DEPRECATED!\n ):\n super().__init__(id=id, style=style, factory=factory)\n\n # Create a platform specific implementation of the widget\n self._create()\n\n ##################################################################\n # 2022-07: Backwards compatibility\n ##################################################################\n\n # initial replaced with value\n if initial is not None:\n if value is not None:\n raise ValueError(\n \"Cannot specify both `initial` and `value`; \"\n \"`initial` has been deprecated, use `value`\"\n )\n else:\n warnings.warn(\n \"`initial` has been renamed `value`\", DeprecationWarning\n )\n value = initial\n\n ##################################################################\n # End backwards compatibility.\n ##################################################################\n\n self.placeholder = placeholder\n self.readonly = readonly\n\n # Set the actual value before on_change, because we do not want on_change triggered by it\n self.value = value\n self.on_change = on_change\n self.validators = validators\n self.on_lose_focus = on_lose_focus\n self.on_gain_focus = on_gain_focus\n\n def _create(self):\n self._impl = self.factory.TextInput(interface=self)\n\n @property\n def readonly(self):\n \"\"\" Whether a user can write into the text input\n\n Returns:\n ``True`` if only read is possible.\n ``False`` if read and write is possible.\n \"\"\"\n return self._readonly\n\n @readonly.setter\n def readonly(self, value):\n self._readonly = value\n self._impl.set_readonly(value)\n\n @property\n def placeholder(self):\n \"\"\" The placeholder text.\n\n Returns:\n The placeholder text as a ``str``.\n \"\"\"\n return self._placeholder\n\n @placeholder.setter\n def placeholder(self, value):\n if value is None:\n self._placeholder = ''\n else:\n self._placeholder = str(value)\n self._impl.set_placeholder(value)\n\n @property\n def value(self):\n \"\"\" The value of the text input field\n\n Returns:\n The current text of the widget as a ``str``.\n \"\"\"\n return self._impl.get_value()\n\n @value.setter\n def value(self, value):\n if value is None:\n v = ''\n else:\n v = str(value)\n self._impl.set_value(v)\n\n @property\n def is_valid(self):\n return self._impl.is_valid()\n\n def clear(self):\n \"\"\" Clears the text of the widget \"\"\"\n self.value = ''\n\n @property\n def on_change(self):\n \"\"\"The handler to invoke when the value changes\n\n Returns:\n The function ``callable`` that is called on a content change.\n \"\"\"\n return self._on_change\n\n @on_change.setter\n def on_change(self, handler):\n \"\"\"Set the handler to invoke when the value is changed.\n\n Args:\n handler (:obj:`callable`): The handler to invoke when the value is changed.\n \"\"\"\n self._on_change = wrapped_handler(self, handler)\n self._impl.set_on_change(self._on_change)\n\n @property\n def validators(self):\n return self._validators\n\n @validators.setter\n def validators(self, validators):\n if validators is None:\n self._validators = []\n else:\n self._validators = validators\n self.validate()\n\n @property\n def on_gain_focus(self):\n \"\"\"The handler to invoke when the widget get focus.\n\n Returns:\n The function ``callable`` that is called on widget focus gain.\n \"\"\"\n return self._on_gain_focus\n\n @on_gain_focus.setter\n def on_gain_focus(self, handler):\n self._on_gain_focus = wrapped_handler(self, handler)\n self._impl.set_on_gain_focus(self._on_gain_focus)\n\n @property\n def on_lose_focus(self):\n \"\"\"The handler to invoke when the widget lose focus.\n\n Returns:\n The function ``callable`` that is called on widget focus loss.\n \"\"\"\n return self._on_lose_focus\n\n @on_lose_focus.setter\n def on_lose_focus(self, handler):\n self._on_lose_focus = wrapped_handler(self, handler)\n self._impl.set_on_lose_focus(self._on_lose_focus)\n\n def validate(self):\n error_message = None\n for validator in self.validators:\n if error_message is None:\n error_message = validator(self.value)\n\n if error_message is None:\n self._impl.clear_error()\n return True\n else:\n self._impl.set_error(error_message)\n return False\n", "path": "src/core/toga/widgets/textinput.py"}], "after_files": [{"content": "from string import ascii_lowercase, ascii_uppercase, digits\n\nimport toga\nfrom toga.constants import COLUMN\nfrom toga.style import Pack\nfrom toga import validators\n\nEMPTY_PASSWORD = 'Empty password'\n\n\nclass TextInputApp(toga.App):\n # Button callback functions\n def do_extract_values(self, widget, **kwargs):\n # Disable all the text inputs\n self.text_input.enabled = False\n self.text_input_placeholder.enabled = False\n self.password_input.enabled = False\n self.number_input.enabled = False\n\n # Update the labels with the extracted values\n self.text_label.text = \"Text content: {}; {}\".format(\n self.text_input.value,\n self.text_input_placeholder.value,\n )\n\n self.password_label.text = \"Your password is {}: {}\".format(\n \"valid\" if self.password_input.is_valid else \"invalid\",\n self.password_input.value\n )\n\n number = self.number_input.value\n if number:\n self.number_label.text = \"Double the number is: {}\".format(number * 2)\n else:\n self.number_label.text = \"You didn't enter a number\"\n\n # Wait 5 seconds\n for i in range(5, 0, -1):\n self.label.text = 'Counting down from {}...'.format(i)\n yield 1\n self.label.text = 'Enter some values and press extract.'\n\n # Renable the inputs again.\n self.text_input.enabled = True\n self.text_input_placeholder.enabled = True\n self.password_input.enabled = True\n self.number_input.enabled = True\n\n def startup(self):\n # Set up main window\n self.main_window = toga.MainWindow(title=self.name)\n\n # Labels to show responses.\n self.label = toga.Label(\n 'Enter some values and press extract.', style=Pack(padding=10)\n )\n self.text_label = toga.Label('Ready.', style=Pack(padding=10))\n self.password_label = toga.Label('Ready.', style=Pack(padding=10))\n self.password_content_label = toga.Label(\n EMPTY_PASSWORD, style=Pack(padding_bottom=10, font_size=9)\n )\n self.number_label = toga.Label('Ready.', style=Pack(padding=10))\n\n # Text inputs and a button\n self.text_input = toga.TextInput(\n value='Initial value',\n placeholder='Type something...', style=Pack(padding=10)\n )\n self.text_input_placeholder = toga.TextInput(\n placeholder='Type something...', style=Pack(padding=10)\n )\n self.password_input = toga.PasswordInput(\n placeholder='Password...',\n style=Pack(padding=10),\n on_change=self.on_password_change,\n validators=[\n validators.MinLength(10),\n validators.ContainsUppercase(),\n validators.ContainsLowercase(),\n validators.ContainsSpecial(),\n validators.ContainsDigit()\n ]\n )\n self.email_input = toga.TextInput(\n placeholder='Email...',\n style=Pack(padding=10),\n validators=[validators.Email()]\n )\n self.number_input = toga.NumberInput(style=Pack(padding=10))\n btn_extract = toga.Button(\n 'Extract values',\n on_press=self.do_extract_values,\n style=Pack(flex=1),\n )\n\n # Outermost box\n box = toga.Box(\n children=[\n self.label,\n self.text_input,\n self.text_input_placeholder,\n self.password_input,\n self.password_content_label,\n self.email_input,\n self.number_input,\n self.text_label,\n self.password_label,\n self.number_label,\n btn_extract,\n ],\n style=Pack(\n flex=1,\n direction=COLUMN,\n padding=10,\n )\n )\n\n # Add the content on the main window\n self.main_window.content = box\n\n # Show the main window\n self.main_window.show()\n\n def on_password_change(self, widget):\n content = widget.value\n self.password_content_label.text = self.get_password_content_label(content)\n\n def get_password_content_label(self, content):\n if content.strip() == \"\":\n return EMPTY_PASSWORD\n contains = set()\n for letter in content:\n if letter in ascii_uppercase:\n contains.add(\"uppercase letters\")\n elif letter in ascii_lowercase:\n contains.add(\"lowercase letters\")\n elif letter in digits:\n contains.add(\"digits\")\n else:\n contains.add(\"special characters\")\n return \"Password contains: {}\".format(', '.join(contains))\n\n\ndef main():\n return TextInputApp('TextInput', 'org.beeware.widgets.textinput')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/textinput/textinput/app.py"}, {"content": "import warnings\n\nfrom toga.handlers import wrapped_handler\n\nfrom .base import Widget\n\n\nclass TextInput(Widget):\n \"\"\" A widget get user input.\n\n Args:\n id (str): An identifier for this widget.\n style (:obj:`Style`): An optional style object. If no style is provided then\n a new one will be created for the widget.\n factory (:obj:`module`): A python module that is capable to return a\n implementation of this class with the same name. (optional & normally not needed)\n value (str): The initial text for the input.\n placeholder (str): If no input is present this text is shown.\n readonly (bool): Whether a user can write into the text input, defaults to `False`.\n on_change (Callable): Method to be called when text is changed in text box\n validators (list): list of validators to run on the value of the text box. Should\n return None is value is valid and an error message if not.\n on_change (``callable``): The handler to invoke when the text changes.\n on_gain_focus (:obj:`callable`): Function to execute when get focused.\n on_lose_focus (:obj:`callable`): Function to execute when lose focus.\n \"\"\"\n MIN_WIDTH = 100\n\n def __init__(\n self,\n id=None,\n style=None,\n factory=None,\n value=None,\n placeholder=None,\n readonly=False,\n on_change=None,\n on_gain_focus=None,\n on_lose_focus=None,\n validators=None,\n initial=None, # DEPRECATED!\n ):\n super().__init__(id=id, style=style, factory=factory)\n\n # Create a platform specific implementation of the widget\n self._create()\n\n ##################################################################\n # 2022-07: Backwards compatibility\n ##################################################################\n\n # initial replaced with value\n if initial is not None:\n if value is not None:\n raise ValueError(\n \"Cannot specify both `initial` and `value`; \"\n \"`initial` has been deprecated, use `value`\"\n )\n else:\n warnings.warn(\n \"`initial` has been renamed `value`\", DeprecationWarning\n )\n value = initial\n\n ##################################################################\n # End backwards compatibility.\n ##################################################################\n\n self.placeholder = placeholder\n self.readonly = readonly\n\n # Set the actual value before on_change, because we do not want on_change triggered by it\n # However, we need to prime the handler property in case it is accessed.\n self._on_change = None\n self.value = value\n\n self.on_change = on_change\n self.validators = validators\n self.on_lose_focus = on_lose_focus\n self.on_gain_focus = on_gain_focus\n\n def _create(self):\n self._impl = self.factory.TextInput(interface=self)\n\n @property\n def readonly(self):\n \"\"\" Whether a user can write into the text input\n\n Returns:\n ``True`` if only read is possible.\n ``False`` if read and write is possible.\n \"\"\"\n return self._readonly\n\n @readonly.setter\n def readonly(self, value):\n self._readonly = value\n self._impl.set_readonly(value)\n\n @property\n def placeholder(self):\n \"\"\" The placeholder text.\n\n Returns:\n The placeholder text as a ``str``.\n \"\"\"\n return self._placeholder\n\n @placeholder.setter\n def placeholder(self, value):\n if value is None:\n self._placeholder = ''\n else:\n self._placeholder = str(value)\n self._impl.set_placeholder(value)\n\n @property\n def value(self):\n \"\"\" The value of the text input field\n\n Returns:\n The current text of the widget as a ``str``.\n \"\"\"\n return self._impl.get_value()\n\n @value.setter\n def value(self, value):\n if value is None:\n v = ''\n else:\n v = str(value)\n self._impl.set_value(v)\n\n @property\n def is_valid(self):\n return self._impl.is_valid()\n\n def clear(self):\n \"\"\" Clears the text of the widget \"\"\"\n self.value = ''\n\n @property\n def on_change(self):\n \"\"\"The handler to invoke when the value changes\n\n Returns:\n The function ``callable`` that is called on a content change.\n \"\"\"\n return self._on_change\n\n @on_change.setter\n def on_change(self, handler):\n \"\"\"Set the handler to invoke when the value is changed.\n\n Args:\n handler (:obj:`callable`): The handler to invoke when the value is changed.\n \"\"\"\n self._on_change = wrapped_handler(self, handler)\n self._impl.set_on_change(self._on_change)\n\n @property\n def validators(self):\n return self._validators\n\n @validators.setter\n def validators(self, validators):\n if validators is None:\n self._validators = []\n else:\n self._validators = validators\n self.validate()\n\n @property\n def on_gain_focus(self):\n \"\"\"The handler to invoke when the widget get focus.\n\n Returns:\n The function ``callable`` that is called on widget focus gain.\n \"\"\"\n return self._on_gain_focus\n\n @on_gain_focus.setter\n def on_gain_focus(self, handler):\n self._on_gain_focus = wrapped_handler(self, handler)\n self._impl.set_on_gain_focus(self._on_gain_focus)\n\n @property\n def on_lose_focus(self):\n \"\"\"The handler to invoke when the widget lose focus.\n\n Returns:\n The function ``callable`` that is called on widget focus loss.\n \"\"\"\n return self._on_lose_focus\n\n @on_lose_focus.setter\n def on_lose_focus(self, handler):\n self._on_lose_focus = wrapped_handler(self, handler)\n self._impl.set_on_lose_focus(self._on_lose_focus)\n\n def validate(self):\n error_message = None\n for validator in self.validators:\n if error_message is None:\n error_message = validator(self.value)\n\n if error_message is None:\n self._impl.clear_error()\n return True\n else:\n self._impl.set_error(error_message)\n return False\n", "path": "src/core/toga/widgets/textinput.py"}]}
| 3,625 | 526 |
gh_patches_debug_13697
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/middleware/exceptions/middleware.py`
Content:
```
1 from __future__ import annotations
2
3 import pdb # noqa: T100
4 from dataclasses import asdict, dataclass, field
5 from inspect import getmro
6 from sys import exc_info
7 from traceback import format_exception
8 from typing import TYPE_CHECKING, Any, Type, cast
9
10 from litestar.connection import Request
11 from litestar.datastructures import Headers
12 from litestar.enums import MediaType, ScopeType
13 from litestar.exceptions import WebSocketException
14 from litestar.middleware.cors import CORSMiddleware
15 from litestar.middleware.exceptions._debug_response import create_debug_response
16 from litestar.status_codes import HTTP_500_INTERNAL_SERVER_ERROR
17
18 __all__ = ("ExceptionHandlerMiddleware", "ExceptionResponseContent", "create_exception_response")
19
20
21 if TYPE_CHECKING:
22 from litestar import Response
23 from litestar.app import Litestar
24 from litestar.logging import BaseLoggingConfig
25 from litestar.types import (
26 ASGIApp,
27 ExceptionHandler,
28 ExceptionHandlersMap,
29 Logger,
30 Receive,
31 Scope,
32 Send,
33 )
34 from litestar.types.asgi_types import WebSocketCloseEvent
35
36
37 def get_exception_handler(exception_handlers: ExceptionHandlersMap, exc: Exception) -> ExceptionHandler | None:
38 """Given a dictionary that maps exceptions and status codes to handler functions, and an exception, returns the
39 appropriate handler if existing.
40
41 Status codes are given preference over exception type.
42
43 If no status code match exists, each class in the MRO of the exception type is checked and
44 the first matching handler is returned.
45
46 Finally, if a ``500`` handler is registered, it will be returned for any exception that isn't a
47 subclass of :class:`HTTPException <litestar.exceptions.HTTPException>`.
48
49 Args:
50 exception_handlers: Mapping of status codes and exception types to handlers.
51 exc: Exception Instance to be resolved to a handler.
52
53 Returns:
54 Optional exception handler callable.
55 """
56 if not exception_handlers:
57 return None
58 status_code: int | None = getattr(exc, "status_code", None)
59 if status_code and (exception_handler := exception_handlers.get(status_code)):
60 return exception_handler
61 for cls in getmro(type(exc)):
62 if cls in exception_handlers:
63 return exception_handlers[cast("Type[Exception]", cls)]
64 if not hasattr(exc, "status_code") and HTTP_500_INTERNAL_SERVER_ERROR in exception_handlers:
65 return exception_handlers[HTTP_500_INTERNAL_SERVER_ERROR]
66 return None
67
68
69 @dataclass
70 class ExceptionResponseContent:
71 """Represent the contents of an exception-response."""
72
73 status_code: int
74 """Exception status code."""
75 detail: str
76 """Exception details or message."""
77 headers: dict[str, str] | None = field(default=None)
78 """Headers to attach to the response."""
79 extra: dict[str, Any] | list[Any] | None = field(default=None)
80 """An extra mapping to attach to the exception."""
81
82 def to_response(self) -> Response:
83 """Create a response from the model attributes.
84
85 Returns:
86 A response instance.
87 """
88 from litestar.response import Response
89
90 return Response(
91 content={k: v for k, v in asdict(self).items() if k != "headers" and v is not None},
92 headers=self.headers,
93 status_code=self.status_code,
94 media_type=MediaType.JSON,
95 )
96
97
98 def create_exception_response(exc: Exception) -> Response:
99 """Construct a response from an exception.
100
101 Notes:
102 - For instances of :class:`HTTPException <litestar.exceptions.HTTPException>` or other exception classes that have a
103 ``status_code`` attribute (e.g. Starlette exceptions), the status code is drawn from the exception, otherwise
104 response status is ``HTTP_500_INTERNAL_SERVER_ERROR``.
105
106 Args:
107 exc: An exception.
108
109 Returns:
110 Response: HTTP response constructed from exception details.
111 """
112 content = ExceptionResponseContent(
113 status_code=getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR),
114 detail=getattr(exc, "detail", repr(exc)),
115 headers=getattr(exc, "headers", None),
116 extra=getattr(exc, "extra", None),
117 )
118 return content.to_response()
119
120
121 class ExceptionHandlerMiddleware:
122 """Middleware used to wrap an ASGIApp inside a try catch block and handle any exceptions raised.
123
124 This used in multiple layers of Litestar.
125 """
126
127 def __init__(self, app: ASGIApp, debug: bool, exception_handlers: ExceptionHandlersMap) -> None:
128 """Initialize ``ExceptionHandlerMiddleware``.
129
130 Args:
131 app: The ``next`` ASGI app to call.
132 debug: Whether ``debug`` mode is enabled
133 exception_handlers: A dictionary mapping status codes and/or exception types to handler functions.
134 """
135 self.app = app
136 self.exception_handlers = exception_handlers
137 self.debug = debug
138
139 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
140 """ASGI-callable.
141
142 Args:
143 scope: The ASGI connection scope.
144 receive: The ASGI receive function.
145 send: The ASGI send function.
146
147 Returns:
148 None
149 """
150 try:
151 await self.app(scope, receive, send)
152 except Exception as e: # noqa: BLE001
153 litestar_app = scope["app"]
154
155 if litestar_app.logging_config and (logger := litestar_app.logger):
156 self.handle_exception_logging(logger=logger, logging_config=litestar_app.logging_config, scope=scope)
157
158 for hook in litestar_app.after_exception:
159 await hook(e, scope)
160
161 if litestar_app.pdb_on_exception:
162 pdb.post_mortem()
163
164 if scope["type"] == ScopeType.HTTP:
165 await self.handle_request_exception(
166 litestar_app=litestar_app, scope=scope, receive=receive, send=send, exc=e
167 )
168 else:
169 await self.handle_websocket_exception(send=send, exc=e)
170
171 async def handle_request_exception(
172 self, litestar_app: Litestar, scope: Scope, receive: Receive, send: Send, exc: Exception
173 ) -> None:
174 """Handle exception raised inside 'http' scope routes.
175
176 Args:
177 litestar_app: The litestar app instance.
178 scope: The ASGI connection scope.
179 receive: The ASGI receive function.
180 send: The ASGI send function.
181 exc: The caught exception.
182
183 Returns:
184 None.
185 """
186
187 headers = Headers.from_scope(scope=scope)
188 if litestar_app.cors_config and (origin := headers.get("origin")):
189 cors_middleware = CORSMiddleware(app=self.app, config=litestar_app.cors_config)
190 send = cors_middleware.send_wrapper(send=send, origin=origin, has_cookie="cookie" in headers)
191
192 exception_handler = get_exception_handler(self.exception_handlers, exc) or self.default_http_exception_handler
193 request = Request[Any, Any, Any](scope=scope, receive=receive, send=send)
194 response = exception_handler(request, exc)
195 await response.to_asgi_response(app=litestar_app, request=request)(scope=scope, receive=receive, send=send)
196
197 @staticmethod
198 async def handle_websocket_exception(send: Send, exc: Exception) -> None:
199 """Handle exception raised inside 'websocket' scope routes.
200
201 Args:
202 send: The ASGI send function.
203 exc: The caught exception.
204
205 Returns:
206 None.
207 """
208 if isinstance(exc, WebSocketException):
209 code = exc.code
210 reason = exc.detail
211 else:
212 code = 4000 + getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
213 reason = getattr(exc, "detail", repr(exc))
214 event: WebSocketCloseEvent = {"type": "websocket.close", "code": code, "reason": reason}
215 await send(event)
216
217 def default_http_exception_handler(self, request: Request, exc: Exception) -> Response[Any]:
218 """Handle an HTTP exception by returning the appropriate response.
219
220 Args:
221 request: An HTTP Request instance.
222 exc: The caught exception.
223
224 Returns:
225 An HTTP response.
226 """
227 status_code = getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
228 if status_code == HTTP_500_INTERNAL_SERVER_ERROR and self.debug:
229 return create_debug_response(request=request, exc=exc)
230 return create_exception_response(exc)
231
232 def handle_exception_logging(self, logger: Logger, logging_config: BaseLoggingConfig, scope: Scope) -> None:
233 """Handle logging - if the litestar app has a logging config in place.
234
235 Args:
236 logger: A logger instance.
237 logging_config: Logging Config instance.
238 scope: The ASGI connection scope.
239
240 Returns:
241 None
242 """
243 if (
244 logging_config.log_exceptions == "always" or (logging_config.log_exceptions == "debug" and self.debug)
245 ) and logging_config.exception_logging_handler:
246 logging_config.exception_logging_handler(logger, scope, format_exception(*exc_info()))
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/litestar/middleware/exceptions/middleware.py b/litestar/middleware/exceptions/middleware.py
--- a/litestar/middleware/exceptions/middleware.py
+++ b/litestar/middleware/exceptions/middleware.py
@@ -109,9 +109,15 @@
Returns:
Response: HTTP response constructed from exception details.
"""
+ status_code = getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR)
+ if status_code == HTTP_500_INTERNAL_SERVER_ERROR:
+ detail = "Internal Server Error"
+ else:
+ detail = getattr(exc, "detail", repr(exc))
+
content = ExceptionResponseContent(
- status_code=getattr(exc, "status_code", HTTP_500_INTERNAL_SERVER_ERROR),
- detail=getattr(exc, "detail", repr(exc)),
+ status_code=status_code,
+ detail=detail,
headers=getattr(exc, "headers", None),
extra=getattr(exc, "extra", None),
)
|
{"golden_diff": "diff --git a/litestar/middleware/exceptions/middleware.py b/litestar/middleware/exceptions/middleware.py\n--- a/litestar/middleware/exceptions/middleware.py\n+++ b/litestar/middleware/exceptions/middleware.py\n@@ -109,9 +109,15 @@\n Returns:\n Response: HTTP response constructed from exception details.\n \"\"\"\n+ status_code = getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n+ if status_code == HTTP_500_INTERNAL_SERVER_ERROR:\n+ detail = \"Internal Server Error\"\n+ else:\n+ detail = getattr(exc, \"detail\", repr(exc))\n+\n content = ExceptionResponseContent(\n- status_code=getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR),\n- detail=getattr(exc, \"detail\", repr(exc)),\n+ status_code=status_code,\n+ detail=detail,\n headers=getattr(exc, \"headers\", None),\n extra=getattr(exc, \"extra\", None),\n )\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nimport pdb # noqa: T100\nfrom dataclasses import asdict, dataclass, field\nfrom inspect import getmro\nfrom sys import exc_info\nfrom traceback import format_exception\nfrom typing import TYPE_CHECKING, Any, Type, cast\n\nfrom litestar.connection import Request\nfrom litestar.datastructures import Headers\nfrom litestar.enums import MediaType, ScopeType\nfrom litestar.exceptions import WebSocketException\nfrom litestar.middleware.cors import CORSMiddleware\nfrom litestar.middleware.exceptions._debug_response import create_debug_response\nfrom litestar.status_codes import HTTP_500_INTERNAL_SERVER_ERROR\n\n__all__ = (\"ExceptionHandlerMiddleware\", \"ExceptionResponseContent\", \"create_exception_response\")\n\n\nif TYPE_CHECKING:\n from litestar import Response\n from litestar.app import Litestar\n from litestar.logging import BaseLoggingConfig\n from litestar.types import (\n ASGIApp,\n ExceptionHandler,\n ExceptionHandlersMap,\n Logger,\n Receive,\n Scope,\n Send,\n )\n from litestar.types.asgi_types import WebSocketCloseEvent\n\n\ndef get_exception_handler(exception_handlers: ExceptionHandlersMap, exc: Exception) -> ExceptionHandler | None:\n \"\"\"Given a dictionary that maps exceptions and status codes to handler functions, and an exception, returns the\n appropriate handler if existing.\n\n Status codes are given preference over exception type.\n\n If no status code match exists, each class in the MRO of the exception type is checked and\n the first matching handler is returned.\n\n Finally, if a ``500`` handler is registered, it will be returned for any exception that isn't a\n subclass of :class:`HTTPException <litestar.exceptions.HTTPException>`.\n\n Args:\n exception_handlers: Mapping of status codes and exception types to handlers.\n exc: Exception Instance to be resolved to a handler.\n\n Returns:\n Optional exception handler callable.\n \"\"\"\n if not exception_handlers:\n return None\n status_code: int | None = getattr(exc, \"status_code\", None)\n if status_code and (exception_handler := exception_handlers.get(status_code)):\n return exception_handler\n for cls in getmro(type(exc)):\n if cls in exception_handlers:\n return exception_handlers[cast(\"Type[Exception]\", cls)]\n if not hasattr(exc, \"status_code\") and HTTP_500_INTERNAL_SERVER_ERROR in exception_handlers:\n return exception_handlers[HTTP_500_INTERNAL_SERVER_ERROR]\n return None\n\n\n@dataclass\nclass ExceptionResponseContent:\n \"\"\"Represent the contents of an exception-response.\"\"\"\n\n status_code: int\n \"\"\"Exception status code.\"\"\"\n detail: str\n \"\"\"Exception details or message.\"\"\"\n headers: dict[str, str] | None = field(default=None)\n \"\"\"Headers to attach to the response.\"\"\"\n extra: dict[str, Any] | list[Any] | None = field(default=None)\n \"\"\"An extra mapping to attach to the exception.\"\"\"\n\n def to_response(self) -> Response:\n \"\"\"Create a response from the model attributes.\n\n Returns:\n A response instance.\n \"\"\"\n from litestar.response import Response\n\n return Response(\n content={k: v for k, v in asdict(self).items() if k != \"headers\" and v is not None},\n headers=self.headers,\n status_code=self.status_code,\n media_type=MediaType.JSON,\n )\n\n\ndef create_exception_response(exc: Exception) -> Response:\n \"\"\"Construct a response from an exception.\n\n Notes:\n - For instances of :class:`HTTPException <litestar.exceptions.HTTPException>` or other exception classes that have a\n ``status_code`` attribute (e.g. Starlette exceptions), the status code is drawn from the exception, otherwise\n response status is ``HTTP_500_INTERNAL_SERVER_ERROR``.\n\n Args:\n exc: An exception.\n\n Returns:\n Response: HTTP response constructed from exception details.\n \"\"\"\n content = ExceptionResponseContent(\n status_code=getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR),\n detail=getattr(exc, \"detail\", repr(exc)),\n headers=getattr(exc, \"headers\", None),\n extra=getattr(exc, \"extra\", None),\n )\n return content.to_response()\n\n\nclass ExceptionHandlerMiddleware:\n \"\"\"Middleware used to wrap an ASGIApp inside a try catch block and handle any exceptions raised.\n\n This used in multiple layers of Litestar.\n \"\"\"\n\n def __init__(self, app: ASGIApp, debug: bool, exception_handlers: ExceptionHandlersMap) -> None:\n \"\"\"Initialize ``ExceptionHandlerMiddleware``.\n\n Args:\n app: The ``next`` ASGI app to call.\n debug: Whether ``debug`` mode is enabled\n exception_handlers: A dictionary mapping status codes and/or exception types to handler functions.\n \"\"\"\n self.app = app\n self.exception_handlers = exception_handlers\n self.debug = debug\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"ASGI-callable.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n await self.app(scope, receive, send)\n except Exception as e: # noqa: BLE001\n litestar_app = scope[\"app\"]\n\n if litestar_app.logging_config and (logger := litestar_app.logger):\n self.handle_exception_logging(logger=logger, logging_config=litestar_app.logging_config, scope=scope)\n\n for hook in litestar_app.after_exception:\n await hook(e, scope)\n\n if litestar_app.pdb_on_exception:\n pdb.post_mortem()\n\n if scope[\"type\"] == ScopeType.HTTP:\n await self.handle_request_exception(\n litestar_app=litestar_app, scope=scope, receive=receive, send=send, exc=e\n )\n else:\n await self.handle_websocket_exception(send=send, exc=e)\n\n async def handle_request_exception(\n self, litestar_app: Litestar, scope: Scope, receive: Receive, send: Send, exc: Exception\n ) -> None:\n \"\"\"Handle exception raised inside 'http' scope routes.\n\n Args:\n litestar_app: The litestar app instance.\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n exc: The caught exception.\n\n Returns:\n None.\n \"\"\"\n\n headers = Headers.from_scope(scope=scope)\n if litestar_app.cors_config and (origin := headers.get(\"origin\")):\n cors_middleware = CORSMiddleware(app=self.app, config=litestar_app.cors_config)\n send = cors_middleware.send_wrapper(send=send, origin=origin, has_cookie=\"cookie\" in headers)\n\n exception_handler = get_exception_handler(self.exception_handlers, exc) or self.default_http_exception_handler\n request = Request[Any, Any, Any](scope=scope, receive=receive, send=send)\n response = exception_handler(request, exc)\n await response.to_asgi_response(app=litestar_app, request=request)(scope=scope, receive=receive, send=send)\n\n @staticmethod\n async def handle_websocket_exception(send: Send, exc: Exception) -> None:\n \"\"\"Handle exception raised inside 'websocket' scope routes.\n\n Args:\n send: The ASGI send function.\n exc: The caught exception.\n\n Returns:\n None.\n \"\"\"\n if isinstance(exc, WebSocketException):\n code = exc.code\n reason = exc.detail\n else:\n code = 4000 + getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n reason = getattr(exc, \"detail\", repr(exc))\n event: WebSocketCloseEvent = {\"type\": \"websocket.close\", \"code\": code, \"reason\": reason}\n await send(event)\n\n def default_http_exception_handler(self, request: Request, exc: Exception) -> Response[Any]:\n \"\"\"Handle an HTTP exception by returning the appropriate response.\n\n Args:\n request: An HTTP Request instance.\n exc: The caught exception.\n\n Returns:\n An HTTP response.\n \"\"\"\n status_code = getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n if status_code == HTTP_500_INTERNAL_SERVER_ERROR and self.debug:\n return create_debug_response(request=request, exc=exc)\n return create_exception_response(exc)\n\n def handle_exception_logging(self, logger: Logger, logging_config: BaseLoggingConfig, scope: Scope) -> None:\n \"\"\"Handle logging - if the litestar app has a logging config in place.\n\n Args:\n logger: A logger instance.\n logging_config: Logging Config instance.\n scope: The ASGI connection scope.\n\n Returns:\n None\n \"\"\"\n if (\n logging_config.log_exceptions == \"always\" or (logging_config.log_exceptions == \"debug\" and self.debug)\n ) and logging_config.exception_logging_handler:\n logging_config.exception_logging_handler(logger, scope, format_exception(*exc_info()))\n", "path": "litestar/middleware/exceptions/middleware.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport pdb # noqa: T100\nfrom dataclasses import asdict, dataclass, field\nfrom inspect import getmro\nfrom sys import exc_info\nfrom traceback import format_exception\nfrom typing import TYPE_CHECKING, Any, Type, cast\n\nfrom litestar.connection import Request\nfrom litestar.datastructures import Headers\nfrom litestar.enums import MediaType, ScopeType\nfrom litestar.exceptions import WebSocketException\nfrom litestar.middleware.cors import CORSMiddleware\nfrom litestar.middleware.exceptions._debug_response import create_debug_response\nfrom litestar.status_codes import HTTP_500_INTERNAL_SERVER_ERROR\n\n__all__ = (\"ExceptionHandlerMiddleware\", \"ExceptionResponseContent\", \"create_exception_response\")\n\n\nif TYPE_CHECKING:\n from litestar import Response\n from litestar.app import Litestar\n from litestar.logging import BaseLoggingConfig\n from litestar.types import (\n ASGIApp,\n ExceptionHandler,\n ExceptionHandlersMap,\n Logger,\n Receive,\n Scope,\n Send,\n )\n from litestar.types.asgi_types import WebSocketCloseEvent\n\n\ndef get_exception_handler(exception_handlers: ExceptionHandlersMap, exc: Exception) -> ExceptionHandler | None:\n \"\"\"Given a dictionary that maps exceptions and status codes to handler functions, and an exception, returns the\n appropriate handler if existing.\n\n Status codes are given preference over exception type.\n\n If no status code match exists, each class in the MRO of the exception type is checked and\n the first matching handler is returned.\n\n Finally, if a ``500`` handler is registered, it will be returned for any exception that isn't a\n subclass of :class:`HTTPException <litestar.exceptions.HTTPException>`.\n\n Args:\n exception_handlers: Mapping of status codes and exception types to handlers.\n exc: Exception Instance to be resolved to a handler.\n\n Returns:\n Optional exception handler callable.\n \"\"\"\n if not exception_handlers:\n return None\n status_code: int | None = getattr(exc, \"status_code\", None)\n if status_code and (exception_handler := exception_handlers.get(status_code)):\n return exception_handler\n for cls in getmro(type(exc)):\n if cls in exception_handlers:\n return exception_handlers[cast(\"Type[Exception]\", cls)]\n if not hasattr(exc, \"status_code\") and HTTP_500_INTERNAL_SERVER_ERROR in exception_handlers:\n return exception_handlers[HTTP_500_INTERNAL_SERVER_ERROR]\n return None\n\n\n@dataclass\nclass ExceptionResponseContent:\n \"\"\"Represent the contents of an exception-response.\"\"\"\n\n status_code: int\n \"\"\"Exception status code.\"\"\"\n detail: str\n \"\"\"Exception details or message.\"\"\"\n headers: dict[str, str] | None = field(default=None)\n \"\"\"Headers to attach to the response.\"\"\"\n extra: dict[str, Any] | list[Any] | None = field(default=None)\n \"\"\"An extra mapping to attach to the exception.\"\"\"\n\n def to_response(self) -> Response:\n \"\"\"Create a response from the model attributes.\n\n Returns:\n A response instance.\n \"\"\"\n from litestar.response import Response\n\n return Response(\n content={k: v for k, v in asdict(self).items() if k != \"headers\" and v is not None},\n headers=self.headers,\n status_code=self.status_code,\n media_type=MediaType.JSON,\n )\n\n\ndef create_exception_response(exc: Exception) -> Response:\n \"\"\"Construct a response from an exception.\n\n Notes:\n - For instances of :class:`HTTPException <litestar.exceptions.HTTPException>` or other exception classes that have a\n ``status_code`` attribute (e.g. Starlette exceptions), the status code is drawn from the exception, otherwise\n response status is ``HTTP_500_INTERNAL_SERVER_ERROR``.\n\n Args:\n exc: An exception.\n\n Returns:\n Response: HTTP response constructed from exception details.\n \"\"\"\n status_code = getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n if status_code == HTTP_500_INTERNAL_SERVER_ERROR:\n detail = \"Internal Server Error\"\n else:\n detail = getattr(exc, \"detail\", repr(exc))\n\n content = ExceptionResponseContent(\n status_code=status_code,\n detail=detail,\n headers=getattr(exc, \"headers\", None),\n extra=getattr(exc, \"extra\", None),\n )\n return content.to_response()\n\n\nclass ExceptionHandlerMiddleware:\n \"\"\"Middleware used to wrap an ASGIApp inside a try catch block and handle any exceptions raised.\n\n This used in multiple layers of Litestar.\n \"\"\"\n\n def __init__(self, app: ASGIApp, debug: bool, exception_handlers: ExceptionHandlersMap) -> None:\n \"\"\"Initialize ``ExceptionHandlerMiddleware``.\n\n Args:\n app: The ``next`` ASGI app to call.\n debug: Whether ``debug`` mode is enabled\n exception_handlers: A dictionary mapping status codes and/or exception types to handler functions.\n \"\"\"\n self.app = app\n self.exception_handlers = exception_handlers\n self.debug = debug\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"ASGI-callable.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n try:\n await self.app(scope, receive, send)\n except Exception as e: # noqa: BLE001\n litestar_app = scope[\"app\"]\n\n if litestar_app.logging_config and (logger := litestar_app.logger):\n self.handle_exception_logging(logger=logger, logging_config=litestar_app.logging_config, scope=scope)\n\n for hook in litestar_app.after_exception:\n await hook(e, scope)\n\n if litestar_app.pdb_on_exception:\n pdb.post_mortem()\n\n if scope[\"type\"] == ScopeType.HTTP:\n await self.handle_request_exception(\n litestar_app=litestar_app, scope=scope, receive=receive, send=send, exc=e\n )\n else:\n await self.handle_websocket_exception(send=send, exc=e)\n\n async def handle_request_exception(\n self, litestar_app: Litestar, scope: Scope, receive: Receive, send: Send, exc: Exception\n ) -> None:\n \"\"\"Handle exception raised inside 'http' scope routes.\n\n Args:\n litestar_app: The litestar app instance.\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n exc: The caught exception.\n\n Returns:\n None.\n \"\"\"\n\n headers = Headers.from_scope(scope=scope)\n if litestar_app.cors_config and (origin := headers.get(\"origin\")):\n cors_middleware = CORSMiddleware(app=self.app, config=litestar_app.cors_config)\n send = cors_middleware.send_wrapper(send=send, origin=origin, has_cookie=\"cookie\" in headers)\n\n exception_handler = get_exception_handler(self.exception_handlers, exc) or self.default_http_exception_handler\n request = Request[Any, Any, Any](scope=scope, receive=receive, send=send)\n response = exception_handler(request, exc)\n await response.to_asgi_response(app=litestar_app, request=request)(scope=scope, receive=receive, send=send)\n\n @staticmethod\n async def handle_websocket_exception(send: Send, exc: Exception) -> None:\n \"\"\"Handle exception raised inside 'websocket' scope routes.\n\n Args:\n send: The ASGI send function.\n exc: The caught exception.\n\n Returns:\n None.\n \"\"\"\n if isinstance(exc, WebSocketException):\n code = exc.code\n reason = exc.detail\n else:\n code = 4000 + getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n reason = getattr(exc, \"detail\", repr(exc))\n event: WebSocketCloseEvent = {\"type\": \"websocket.close\", \"code\": code, \"reason\": reason}\n await send(event)\n\n def default_http_exception_handler(self, request: Request, exc: Exception) -> Response[Any]:\n \"\"\"Handle an HTTP exception by returning the appropriate response.\n\n Args:\n request: An HTTP Request instance.\n exc: The caught exception.\n\n Returns:\n An HTTP response.\n \"\"\"\n status_code = getattr(exc, \"status_code\", HTTP_500_INTERNAL_SERVER_ERROR)\n if status_code == HTTP_500_INTERNAL_SERVER_ERROR and self.debug:\n return create_debug_response(request=request, exc=exc)\n return create_exception_response(exc)\n\n def handle_exception_logging(self, logger: Logger, logging_config: BaseLoggingConfig, scope: Scope) -> None:\n \"\"\"Handle logging - if the litestar app has a logging config in place.\n\n Args:\n logger: A logger instance.\n logging_config: Logging Config instance.\n scope: The ASGI connection scope.\n\n Returns:\n None\n \"\"\"\n if (\n logging_config.log_exceptions == \"always\" or (logging_config.log_exceptions == \"debug\" and self.debug)\n ) and logging_config.exception_logging_handler:\n logging_config.exception_logging_handler(logger, scope, format_exception(*exc_info()))\n", "path": "litestar/middleware/exceptions/middleware.py"}]}
| 3,030 | 223 |
gh_patches_debug_3256
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1467
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenID login button is broken in latest firefox
Behavior:
When clicking the "login" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`
Expected Behavior:
Display the openID page and allow login
Steps to reproduce
Go to https://evap.hpi.de and click the "login" button. Browser is an up-to-date firefox on linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/middleware.py`
Content:
```
1 from django.contrib.auth.views import redirect_to_login
2
3
4 class RequireLoginMiddleware:
5 def __init__(self, get_response):
6 self.get_response = get_response
7
8 def __call__(self, request):
9 return self.get_response(request)
10
11 @staticmethod
12 def process_view(request, view_func, _view_args, _view_kwargs):
13 # Returning None tells django to pass the request on
14 if request.user.is_authenticated:
15 return None
16
17 if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
18 return None
19
20 return redirect_to_login(request.get_full_path())
21
22
23 def no_login_required(func):
24 func.no_login_required = True
25 return func
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evap/middleware.py b/evap/middleware.py
--- a/evap/middleware.py
+++ b/evap/middleware.py
@@ -17,6 +17,9 @@
if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
return None
+ if view_func.__name__ in ["OIDCAuthenticationRequestView", "OIDCAuthenticationCallbackView"]:
+ return None
+
return redirect_to_login(request.get_full_path())
|
{"golden_diff": "diff --git a/evap/middleware.py b/evap/middleware.py\n--- a/evap/middleware.py\n+++ b/evap/middleware.py\n@@ -17,6 +17,9 @@\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n \n+ if view_func.__name__ in [\"OIDCAuthenticationRequestView\", \"OIDCAuthenticationCallbackView\"]:\n+ return None\n+\n return redirect_to_login(request.get_full_path())\n", "issue": "OpenID login button is broken in latest firefox\nBehavior:\r\nWhen clicking the \"login\" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`\r\n\r\nExpected Behavior:\r\nDisplay the openID page and allow login\r\n\r\nSteps to reproduce\r\nGo to https://evap.hpi.de and click the \"login\" button. Browser is an up-to-date firefox on linux\n", "before_files": [{"content": "from django.contrib.auth.views import redirect_to_login\n\n\nclass RequireLoginMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n @staticmethod\n def process_view(request, view_func, _view_args, _view_kwargs):\n # Returning None tells django to pass the request on\n if request.user.is_authenticated:\n return None\n\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n\n return redirect_to_login(request.get_full_path())\n\n\ndef no_login_required(func):\n func.no_login_required = True\n return func\n", "path": "evap/middleware.py"}], "after_files": [{"content": "from django.contrib.auth.views import redirect_to_login\n\n\nclass RequireLoginMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n @staticmethod\n def process_view(request, view_func, _view_args, _view_kwargs):\n # Returning None tells django to pass the request on\n if request.user.is_authenticated:\n return None\n\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n\n if view_func.__name__ in [\"OIDCAuthenticationRequestView\", \"OIDCAuthenticationCallbackView\"]:\n return None\n\n return redirect_to_login(request.get_full_path())\n\n\ndef no_login_required(func):\n func.no_login_required = True\n return func\n", "path": "evap/middleware.py"}]}
| 629 | 113 |
gh_patches_debug_22241
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-9639
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Telemetry: track `extensions` and `html_theme` Sphinx variables
While working on https://github.com/sphinx-doc/sphinx/issues/10070, @AA-Turner ask for the content of `extensions` and `html_theme` and I think it could be useful to store in the Telemetry database somehow. Considering these values may be dynamically populated, we should find a way to execute this code and grab the final value of them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/telemetry/collectors.py`
Content:
```
1 """Data collectors."""
2
3 import json
4
5 import dparse
6 import structlog
7
8 from readthedocs.config.models import PythonInstallRequirements
9
10 log = structlog.get_logger(__name__)
11
12
13 class BuildDataCollector:
14
15 """
16 Build data collector.
17
18 Collect data from a runnig build.
19 """
20
21 def __init__(self, environment):
22 self.environment = environment
23 self.build = self.environment.build
24 self.project = self.environment.project
25 self.version = self.environment.version
26 self.config = self.environment.config
27 self.checkout_path = self.project.checkout_path(self.version.slug)
28
29 log.bind(
30 build_id=self.build["id"],
31 project_slug=self.project.slug,
32 version_slug=self.version.slug,
33 )
34
35 @staticmethod
36 def _safe_json_loads(content, default=None):
37 def lowercase(d): # pylint: disable=invalid-name
38 """Convert all dictionary keys to lowercase."""
39 return {k.lower(): i for k, i in d.items()}
40
41 # pylint: disable=broad-except
42 try:
43 # Use ``object_hook`` parameter to lowercase all the keys of the dictionary.
44 # This helps us to have our data normalized and improve queries.
45 return json.loads(content, object_hook=lowercase)
46 except Exception:
47 log.info(
48 "Error while loading JSON content.",
49 exc_info=True,
50 )
51 return default
52
53 def run(self, *args, **kwargs):
54 build_cmd = self.environment.run(*args, record=False, demux=True, **kwargs)
55 return build_cmd.exit_code, build_cmd.output, build_cmd.error
56
57 def collect(self):
58 """
59 Collect all relevant data from the runnig build.
60
61 Data that can be extracted from the database (project/organization)
62 isn't collected here.
63 """
64
65 # NOTE: we could run each command inside a try/except block to have a
66 # more granular protection and be able to save data from those commands
67 # that didn't fail. Otherwise, if one command fails, all the data for
68 # this Build is lost.
69
70 data = {}
71 data["config"] = {"user": self.config.source_config}
72 data["os"] = self._get_operating_system()
73 data["python"] = self._get_python_version()
74
75 user_apt_packages, all_apt_packages = self._get_apt_packages()
76 conda_packages = (
77 self._get_all_conda_packages() if self.config.is_using_conda else {}
78 )
79 data["packages"] = {
80 "pip": {
81 "user": self._get_user_pip_packages(),
82 "all": self._get_all_pip_packages(),
83 },
84 "conda": {
85 "all": conda_packages,
86 },
87 "apt": {
88 "user": user_apt_packages,
89 "all": all_apt_packages,
90 },
91 }
92 return data
93
94 def _get_all_conda_packages(self):
95 """
96 Get all the packages installed by the user using conda.
97
98 This includes top level and transitive dependencies.
99 The output of ``conda list`` is in the form of::
100
101 [
102 {
103 "base_url": "https://conda.anaconda.org/conda-forge",
104 "build_number": 0,
105 "build_string": "py_0",
106 "channel": "conda-forge",
107 "dist_name": "alabaster-0.7.12-py_0",
108 "name": "alabaster",
109 "platform": "noarch",
110 "version": "0.7.12"
111 },
112 {
113 "base_url": "https://conda.anaconda.org/conda-forge",
114 "build_number": 0,
115 "build_string": "pyh9f0ad1d_0",
116 "channel": "conda-forge",
117 "dist_name": "asn1crypto-1.4.0-pyh9f0ad1d_0",
118 "name": "asn1crypto",
119 "platform": "noarch",
120 "version": "1.4.0"
121 }
122 ]
123 """
124 code, stdout, _ = self.run(
125 "conda", "list", "--json", "--name", self.version.slug
126 )
127 if code == 0 and stdout:
128 packages = self._safe_json_loads(stdout, [])
129 packages = [
130 {
131 "name": package["name"],
132 "channel": package["channel"],
133 "version": package["version"],
134 }
135 for package in packages
136 ]
137 return packages
138 return []
139
140 def _get_user_pip_packages(self):
141 """
142 Get all the packages to be installed defined by the user.
143
144 It parses all the requirements files specified in the config file by
145 the user (python.install.requirements) using ``dparse`` --a 3rd party
146 package.
147
148 If the version of the package is explicit (==) it saves that particular
149 version. Otherwise, if it's not defined, it saves ``undefined`` and if
150 it's a non deterministic operation (like >=, <= or ~=) it saves
151 ``unknown`` in the version.
152
153 """
154 results = []
155 # pylint: disable=too-many-nested-blocks
156 for install in self.config.python.install:
157 if isinstance(install, PythonInstallRequirements):
158 if install.requirements:
159 cmd = ["cat", install.requirements]
160 _, stdout, _ = self.run(*cmd, cwd=self.checkout_path)
161 # pylint: disable=invalid-name
162 df = dparse.parse(
163 stdout, file_type=dparse.filetypes.requirements_txt
164 ).serialize()
165 dependencies = df.get("dependencies", [])
166 for requirement in dependencies:
167 name = requirement.get("name", "").lower()
168 if not name:
169 continue
170
171 # If the user defines a specific version in the
172 # requirements file, we save it Otherwise, we don't
173 # because we don't know which version will be
174 # installed.
175 version = "undefined"
176 specs = str(requirement.get("specs", ""))
177 if specs:
178 if specs.startswith("=="):
179 version = specs.replace("==", "", 1)
180 else:
181 version = "unknown"
182
183 results.append(
184 {
185 "name": name,
186 "version": version,
187 }
188 )
189 return results
190
191 def _get_all_pip_packages(self):
192 """
193 Get all the packages installed by pip.
194
195 This includes top level and transitive dependencies.
196 The output of ``pip list`` is in the form of::
197
198 [
199 {
200 "name": "requests-mock",
201 "version": "1.8.0"
202 },
203 {
204 "name": "requests-toolbelt",
205 "version": "0.9.1"
206 },
207 {
208 "name": "rstcheck",
209 "version": "3.3.1"
210 },
211 {
212 "name": "selectolax",
213 "version": "0.2.10"
214 },
215 {
216 "name": "slumber",
217 "version": "0.7.1"
218 }
219 ]
220 """
221 cmd = [
222 "python",
223 "-m",
224 "pip",
225 "list",
226 "--pre",
227 "--local",
228 "--format",
229 "json",
230 "--not-required",
231 ]
232 code, stdout, _ = self.run(*cmd)
233 if code == 0 and stdout:
234 return self._safe_json_loads(stdout, [])
235 return []
236
237 def _get_operating_system(self):
238 """
239 Get the current operating system.
240
241 The output of ``lsb_release --description`` is in the form of::
242
243 Description: Ubuntu 20.04.3 LTS
244 """
245 code, stdout, _ = self.run("lsb_release", "--description")
246 stdout = stdout.strip()
247 if code == 0 and stdout:
248 parts = stdout.split("\t")
249 if len(parts) == 2:
250 return parts[1]
251 return ""
252
253 def _get_apt_packages(self):
254 """
255 Get the list of installed apt packages (global and from the user).
256
257 The current source of user installed packages is the config file,
258 but we have only the name, so we take the version from the list of all
259 installed packages.
260 """
261 all_apt_packages = self._get_all_apt_packages()
262 all_apt_packages_dict = {
263 package["name"]: package["version"] for package in all_apt_packages
264 }
265 user_apt_packages = self._get_user_apt_packages()
266 for package in user_apt_packages:
267 package["version"] = all_apt_packages_dict.get(package["name"], "")
268 return user_apt_packages, all_apt_packages
269
270 def _get_all_apt_packages(self):
271 """
272 Get all installed apt packages and their versions.
273
274 The output of ``dpkg-query --show`` is the form of::
275
276 adduser 3.116ubuntu1
277 apt 1.6.14
278 base-files 10.1ubuntu2.11
279 base-passwd 3.5.44
280 bash 4.4.18-2ubuntu1.2
281 bsdutils 1:2.31.1-0.4ubuntu3.7
282 bzip2 1.0.6-8.1ubuntu0.2
283 coreutils 8.28-1ubuntu1
284 dash 0.5.8-2.10
285 debconf 1.5.66ubuntu1
286 debianutils 4.8.4
287 diffutils 1:3.6-1
288 dpkg 1.19.0.5ubuntu2.3
289 e2fsprogs 1.44.1-1ubuntu1.3
290 fdisk 2.31.1-0.4ubuntu3.7
291 findutils 4.6.0+git+20170828-2
292 gcc-8-base 8.4.0-1ubuntu1~18.04
293 gpgv 2.2.4-1ubuntu1.4
294 grep 3.1-2build1
295 gzip 1.6-5ubuntu1.2
296 hostname 3.20
297 """
298 code, stdout, _ = self.run(
299 "dpkg-query", "--showformat", "${package} ${version}\\n", "--show"
300 )
301 stdout = stdout.strip()
302 packages = []
303 if code != 0 or not stdout:
304 return packages
305
306 for line in stdout.split("\n"):
307 parts = line.split()
308 if len(parts) == 2:
309 package, version = parts
310 packages.append(
311 {
312 "name": package.lower(),
313 "version": version,
314 }
315 )
316
317 return packages
318
319 def _get_user_apt_packages(self):
320 return [
321 {"name": package.lower(), "version": ""}
322 for package in self.config.build.apt_packages
323 ]
324
325 def _get_python_version(self):
326 """
327 Get the python version currently used.
328
329 The output of ``python --version`` is in the form of::
330
331 Python 3.8.12
332 """
333 code, stdout, _ = self.run("python", "--version")
334 stdout = stdout.strip()
335 if code == 0 and stdout:
336 parts = stdout.split()
337 if len(parts) == 2:
338 return parts[1]
339 return ""
340
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/telemetry/collectors.py b/readthedocs/telemetry/collectors.py
--- a/readthedocs/telemetry/collectors.py
+++ b/readthedocs/telemetry/collectors.py
@@ -1,6 +1,7 @@
"""Data collectors."""
import json
+import os
import dparse
import structlog
@@ -89,6 +90,37 @@
"all": all_apt_packages,
},
}
+ data["doctool"] = self._get_doctool()
+ return data
+
+ def _get_doctool_name(self):
+ if self.version.is_sphinx_type:
+ return "sphinx"
+
+ if self.version.is_mkdocs_type:
+ return "mkdocs"
+
+ return "generic"
+
+ def _get_doctool(self):
+ data = {
+ "name": self._get_doctool_name(),
+ "extensions": [],
+ "html_theme": "",
+ }
+
+ if self._get_doctool_name() != "sphinx":
+ return data
+
+ conf_py_dir = os.path.join(
+ self.checkout_path,
+ os.path.dirname(self.config.sphinx.configuration),
+ )
+ filepath = os.path.join(conf_py_dir, "_build", "json", "telemetry.json")
+ if os.path.exists(filepath):
+ with open(filepath, "r") as json_file:
+ content = json_file.read()
+ data.update(self._safe_json_loads(content, {}))
return data
def _get_all_conda_packages(self):
|
{"golden_diff": "diff --git a/readthedocs/telemetry/collectors.py b/readthedocs/telemetry/collectors.py\n--- a/readthedocs/telemetry/collectors.py\n+++ b/readthedocs/telemetry/collectors.py\n@@ -1,6 +1,7 @@\n \"\"\"Data collectors.\"\"\"\n \n import json\n+import os\n \n import dparse\n import structlog\n@@ -89,6 +90,37 @@\n \"all\": all_apt_packages,\n },\n }\n+ data[\"doctool\"] = self._get_doctool()\n+ return data\n+\n+ def _get_doctool_name(self):\n+ if self.version.is_sphinx_type:\n+ return \"sphinx\"\n+\n+ if self.version.is_mkdocs_type:\n+ return \"mkdocs\"\n+\n+ return \"generic\"\n+\n+ def _get_doctool(self):\n+ data = {\n+ \"name\": self._get_doctool_name(),\n+ \"extensions\": [],\n+ \"html_theme\": \"\",\n+ }\n+\n+ if self._get_doctool_name() != \"sphinx\":\n+ return data\n+\n+ conf_py_dir = os.path.join(\n+ self.checkout_path,\n+ os.path.dirname(self.config.sphinx.configuration),\n+ )\n+ filepath = os.path.join(conf_py_dir, \"_build\", \"json\", \"telemetry.json\")\n+ if os.path.exists(filepath):\n+ with open(filepath, \"r\") as json_file:\n+ content = json_file.read()\n+ data.update(self._safe_json_loads(content, {}))\n return data\n \n def _get_all_conda_packages(self):\n", "issue": "Telemetry: track `extensions` and `html_theme` Sphinx variables\nWhile working on https://github.com/sphinx-doc/sphinx/issues/10070, @AA-Turner ask for the content of `extensions` and `html_theme` and I think it could be useful to store in the Telemetry database somehow. Considering these values may be dynamically populated, we should find a way to execute this code and grab the final value of them.\n", "before_files": [{"content": "\"\"\"Data collectors.\"\"\"\n\nimport json\n\nimport dparse\nimport structlog\n\nfrom readthedocs.config.models import PythonInstallRequirements\n\nlog = structlog.get_logger(__name__)\n\n\nclass BuildDataCollector:\n\n \"\"\"\n Build data collector.\n\n Collect data from a runnig build.\n \"\"\"\n\n def __init__(self, environment):\n self.environment = environment\n self.build = self.environment.build\n self.project = self.environment.project\n self.version = self.environment.version\n self.config = self.environment.config\n self.checkout_path = self.project.checkout_path(self.version.slug)\n\n log.bind(\n build_id=self.build[\"id\"],\n project_slug=self.project.slug,\n version_slug=self.version.slug,\n )\n\n @staticmethod\n def _safe_json_loads(content, default=None):\n def lowercase(d): # pylint: disable=invalid-name\n \"\"\"Convert all dictionary keys to lowercase.\"\"\"\n return {k.lower(): i for k, i in d.items()}\n\n # pylint: disable=broad-except\n try:\n # Use ``object_hook`` parameter to lowercase all the keys of the dictionary.\n # This helps us to have our data normalized and improve queries.\n return json.loads(content, object_hook=lowercase)\n except Exception:\n log.info(\n \"Error while loading JSON content.\",\n exc_info=True,\n )\n return default\n\n def run(self, *args, **kwargs):\n build_cmd = self.environment.run(*args, record=False, demux=True, **kwargs)\n return build_cmd.exit_code, build_cmd.output, build_cmd.error\n\n def collect(self):\n \"\"\"\n Collect all relevant data from the runnig build.\n\n Data that can be extracted from the database (project/organization)\n isn't collected here.\n \"\"\"\n\n # NOTE: we could run each command inside a try/except block to have a\n # more granular protection and be able to save data from those commands\n # that didn't fail. Otherwise, if one command fails, all the data for\n # this Build is lost.\n\n data = {}\n data[\"config\"] = {\"user\": self.config.source_config}\n data[\"os\"] = self._get_operating_system()\n data[\"python\"] = self._get_python_version()\n\n user_apt_packages, all_apt_packages = self._get_apt_packages()\n conda_packages = (\n self._get_all_conda_packages() if self.config.is_using_conda else {}\n )\n data[\"packages\"] = {\n \"pip\": {\n \"user\": self._get_user_pip_packages(),\n \"all\": self._get_all_pip_packages(),\n },\n \"conda\": {\n \"all\": conda_packages,\n },\n \"apt\": {\n \"user\": user_apt_packages,\n \"all\": all_apt_packages,\n },\n }\n return data\n\n def _get_all_conda_packages(self):\n \"\"\"\n Get all the packages installed by the user using conda.\n\n This includes top level and transitive dependencies.\n The output of ``conda list`` is in the form of::\n\n [\n {\n \"base_url\": \"https://conda.anaconda.org/conda-forge\",\n \"build_number\": 0,\n \"build_string\": \"py_0\",\n \"channel\": \"conda-forge\",\n \"dist_name\": \"alabaster-0.7.12-py_0\",\n \"name\": \"alabaster\",\n \"platform\": \"noarch\",\n \"version\": \"0.7.12\"\n },\n {\n \"base_url\": \"https://conda.anaconda.org/conda-forge\",\n \"build_number\": 0,\n \"build_string\": \"pyh9f0ad1d_0\",\n \"channel\": \"conda-forge\",\n \"dist_name\": \"asn1crypto-1.4.0-pyh9f0ad1d_0\",\n \"name\": \"asn1crypto\",\n \"platform\": \"noarch\",\n \"version\": \"1.4.0\"\n }\n ]\n \"\"\"\n code, stdout, _ = self.run(\n \"conda\", \"list\", \"--json\", \"--name\", self.version.slug\n )\n if code == 0 and stdout:\n packages = self._safe_json_loads(stdout, [])\n packages = [\n {\n \"name\": package[\"name\"],\n \"channel\": package[\"channel\"],\n \"version\": package[\"version\"],\n }\n for package in packages\n ]\n return packages\n return []\n\n def _get_user_pip_packages(self):\n \"\"\"\n Get all the packages to be installed defined by the user.\n\n It parses all the requirements files specified in the config file by\n the user (python.install.requirements) using ``dparse`` --a 3rd party\n package.\n\n If the version of the package is explicit (==) it saves that particular\n version. Otherwise, if it's not defined, it saves ``undefined`` and if\n it's a non deterministic operation (like >=, <= or ~=) it saves\n ``unknown`` in the version.\n\n \"\"\"\n results = []\n # pylint: disable=too-many-nested-blocks\n for install in self.config.python.install:\n if isinstance(install, PythonInstallRequirements):\n if install.requirements:\n cmd = [\"cat\", install.requirements]\n _, stdout, _ = self.run(*cmd, cwd=self.checkout_path)\n # pylint: disable=invalid-name\n df = dparse.parse(\n stdout, file_type=dparse.filetypes.requirements_txt\n ).serialize()\n dependencies = df.get(\"dependencies\", [])\n for requirement in dependencies:\n name = requirement.get(\"name\", \"\").lower()\n if not name:\n continue\n\n # If the user defines a specific version in the\n # requirements file, we save it Otherwise, we don't\n # because we don't know which version will be\n # installed.\n version = \"undefined\"\n specs = str(requirement.get(\"specs\", \"\"))\n if specs:\n if specs.startswith(\"==\"):\n version = specs.replace(\"==\", \"\", 1)\n else:\n version = \"unknown\"\n\n results.append(\n {\n \"name\": name,\n \"version\": version,\n }\n )\n return results\n\n def _get_all_pip_packages(self):\n \"\"\"\n Get all the packages installed by pip.\n\n This includes top level and transitive dependencies.\n The output of ``pip list`` is in the form of::\n\n [\n {\n \"name\": \"requests-mock\",\n \"version\": \"1.8.0\"\n },\n {\n \"name\": \"requests-toolbelt\",\n \"version\": \"0.9.1\"\n },\n {\n \"name\": \"rstcheck\",\n \"version\": \"3.3.1\"\n },\n {\n \"name\": \"selectolax\",\n \"version\": \"0.2.10\"\n },\n {\n \"name\": \"slumber\",\n \"version\": \"0.7.1\"\n }\n ]\n \"\"\"\n cmd = [\n \"python\",\n \"-m\",\n \"pip\",\n \"list\",\n \"--pre\",\n \"--local\",\n \"--format\",\n \"json\",\n \"--not-required\",\n ]\n code, stdout, _ = self.run(*cmd)\n if code == 0 and stdout:\n return self._safe_json_loads(stdout, [])\n return []\n\n def _get_operating_system(self):\n \"\"\"\n Get the current operating system.\n\n The output of ``lsb_release --description`` is in the form of::\n\n Description:\tUbuntu 20.04.3 LTS\n \"\"\"\n code, stdout, _ = self.run(\"lsb_release\", \"--description\")\n stdout = stdout.strip()\n if code == 0 and stdout:\n parts = stdout.split(\"\\t\")\n if len(parts) == 2:\n return parts[1]\n return \"\"\n\n def _get_apt_packages(self):\n \"\"\"\n Get the list of installed apt packages (global and from the user).\n\n The current source of user installed packages is the config file,\n but we have only the name, so we take the version from the list of all\n installed packages.\n \"\"\"\n all_apt_packages = self._get_all_apt_packages()\n all_apt_packages_dict = {\n package[\"name\"]: package[\"version\"] for package in all_apt_packages\n }\n user_apt_packages = self._get_user_apt_packages()\n for package in user_apt_packages:\n package[\"version\"] = all_apt_packages_dict.get(package[\"name\"], \"\")\n return user_apt_packages, all_apt_packages\n\n def _get_all_apt_packages(self):\n \"\"\"\n Get all installed apt packages and their versions.\n\n The output of ``dpkg-query --show`` is the form of::\n\n adduser 3.116ubuntu1\n apt 1.6.14\n base-files 10.1ubuntu2.11\n base-passwd 3.5.44\n bash 4.4.18-2ubuntu1.2\n bsdutils 1:2.31.1-0.4ubuntu3.7\n bzip2 1.0.6-8.1ubuntu0.2\n coreutils 8.28-1ubuntu1\n dash 0.5.8-2.10\n debconf 1.5.66ubuntu1\n debianutils 4.8.4\n diffutils 1:3.6-1\n dpkg 1.19.0.5ubuntu2.3\n e2fsprogs 1.44.1-1ubuntu1.3\n fdisk 2.31.1-0.4ubuntu3.7\n findutils 4.6.0+git+20170828-2\n gcc-8-base 8.4.0-1ubuntu1~18.04\n gpgv 2.2.4-1ubuntu1.4\n grep 3.1-2build1\n gzip 1.6-5ubuntu1.2\n hostname 3.20\n \"\"\"\n code, stdout, _ = self.run(\n \"dpkg-query\", \"--showformat\", \"${package} ${version}\\\\n\", \"--show\"\n )\n stdout = stdout.strip()\n packages = []\n if code != 0 or not stdout:\n return packages\n\n for line in stdout.split(\"\\n\"):\n parts = line.split()\n if len(parts) == 2:\n package, version = parts\n packages.append(\n {\n \"name\": package.lower(),\n \"version\": version,\n }\n )\n\n return packages\n\n def _get_user_apt_packages(self):\n return [\n {\"name\": package.lower(), \"version\": \"\"}\n for package in self.config.build.apt_packages\n ]\n\n def _get_python_version(self):\n \"\"\"\n Get the python version currently used.\n\n The output of ``python --version`` is in the form of::\n\n Python 3.8.12\n \"\"\"\n code, stdout, _ = self.run(\"python\", \"--version\")\n stdout = stdout.strip()\n if code == 0 and stdout:\n parts = stdout.split()\n if len(parts) == 2:\n return parts[1]\n return \"\"\n", "path": "readthedocs/telemetry/collectors.py"}], "after_files": [{"content": "\"\"\"Data collectors.\"\"\"\n\nimport json\nimport os\n\nimport dparse\nimport structlog\n\nfrom readthedocs.config.models import PythonInstallRequirements\n\nlog = structlog.get_logger(__name__)\n\n\nclass BuildDataCollector:\n\n \"\"\"\n Build data collector.\n\n Collect data from a runnig build.\n \"\"\"\n\n def __init__(self, environment):\n self.environment = environment\n self.build = self.environment.build\n self.project = self.environment.project\n self.version = self.environment.version\n self.config = self.environment.config\n self.checkout_path = self.project.checkout_path(self.version.slug)\n\n log.bind(\n build_id=self.build[\"id\"],\n project_slug=self.project.slug,\n version_slug=self.version.slug,\n )\n\n @staticmethod\n def _safe_json_loads(content, default=None):\n def lowercase(d): # pylint: disable=invalid-name\n \"\"\"Convert all dictionary keys to lowercase.\"\"\"\n return {k.lower(): i for k, i in d.items()}\n\n # pylint: disable=broad-except\n try:\n # Use ``object_hook`` parameter to lowercase all the keys of the dictionary.\n # This helps us to have our data normalized and improve queries.\n return json.loads(content, object_hook=lowercase)\n except Exception:\n log.info(\n \"Error while loading JSON content.\",\n exc_info=True,\n )\n return default\n\n def run(self, *args, **kwargs):\n build_cmd = self.environment.run(*args, record=False, demux=True, **kwargs)\n return build_cmd.exit_code, build_cmd.output, build_cmd.error\n\n def collect(self):\n \"\"\"\n Collect all relevant data from the runnig build.\n\n Data that can be extracted from the database (project/organization)\n isn't collected here.\n \"\"\"\n\n # NOTE: we could run each command inside a try/except block to have a\n # more granular protection and be able to save data from those commands\n # that didn't fail. Otherwise, if one command fails, all the data for\n # this Build is lost.\n\n data = {}\n data[\"config\"] = {\"user\": self.config.source_config}\n data[\"os\"] = self._get_operating_system()\n data[\"python\"] = self._get_python_version()\n\n user_apt_packages, all_apt_packages = self._get_apt_packages()\n conda_packages = (\n self._get_all_conda_packages() if self.config.is_using_conda else {}\n )\n data[\"packages\"] = {\n \"pip\": {\n \"user\": self._get_user_pip_packages(),\n \"all\": self._get_all_pip_packages(),\n },\n \"conda\": {\n \"all\": conda_packages,\n },\n \"apt\": {\n \"user\": user_apt_packages,\n \"all\": all_apt_packages,\n },\n }\n data[\"doctool\"] = self._get_doctool()\n return data\n\n def _get_doctool_name(self):\n if self.version.is_sphinx_type:\n return \"sphinx\"\n\n if self.version.is_mkdocs_type:\n return \"mkdocs\"\n\n return \"generic\"\n\n def _get_doctool(self):\n data = {\n \"name\": self._get_doctool_name(),\n \"extensions\": [],\n \"html_theme\": \"\",\n }\n\n if self._get_doctool_name() != \"sphinx\":\n return data\n\n conf_py_dir = os.path.join(\n self.checkout_path,\n os.path.dirname(self.config.sphinx.configuration),\n )\n filepath = os.path.join(conf_py_dir, \"_build\", \"json\", \"telemetry.json\")\n if os.path.exists(filepath):\n with open(filepath, \"r\") as json_file:\n content = json_file.read()\n data.update(self._safe_json_loads(content, {}))\n return data\n\n def _get_all_conda_packages(self):\n \"\"\"\n Get all the packages installed by the user using conda.\n\n This includes top level and transitive dependencies.\n The output of ``conda list`` is in the form of::\n\n [\n {\n \"base_url\": \"https://conda.anaconda.org/conda-forge\",\n \"build_number\": 0,\n \"build_string\": \"py_0\",\n \"channel\": \"conda-forge\",\n \"dist_name\": \"alabaster-0.7.12-py_0\",\n \"name\": \"alabaster\",\n \"platform\": \"noarch\",\n \"version\": \"0.7.12\"\n },\n {\n \"base_url\": \"https://conda.anaconda.org/conda-forge\",\n \"build_number\": 0,\n \"build_string\": \"pyh9f0ad1d_0\",\n \"channel\": \"conda-forge\",\n \"dist_name\": \"asn1crypto-1.4.0-pyh9f0ad1d_0\",\n \"name\": \"asn1crypto\",\n \"platform\": \"noarch\",\n \"version\": \"1.4.0\"\n }\n ]\n \"\"\"\n code, stdout, _ = self.run(\n \"conda\", \"list\", \"--json\", \"--name\", self.version.slug\n )\n if code == 0 and stdout:\n packages = self._safe_json_loads(stdout, [])\n packages = [\n {\n \"name\": package[\"name\"],\n \"channel\": package[\"channel\"],\n \"version\": package[\"version\"],\n }\n for package in packages\n ]\n return packages\n return []\n\n def _get_user_pip_packages(self):\n \"\"\"\n Get all the packages to be installed defined by the user.\n\n It parses all the requirements files specified in the config file by\n the user (python.install.requirements) using ``dparse`` --a 3rd party\n package.\n\n If the version of the package is explicit (==) it saves that particular\n version. Otherwise, if it's not defined, it saves ``undefined`` and if\n it's a non deterministic operation (like >=, <= or ~=) it saves\n ``unknown`` in the version.\n\n \"\"\"\n results = []\n # pylint: disable=too-many-nested-blocks\n for install in self.config.python.install:\n if isinstance(install, PythonInstallRequirements):\n if install.requirements:\n cmd = [\"cat\", install.requirements]\n _, stdout, _ = self.run(*cmd, cwd=self.checkout_path)\n # pylint: disable=invalid-name\n df = dparse.parse(\n stdout, file_type=dparse.filetypes.requirements_txt\n ).serialize()\n dependencies = df.get(\"dependencies\", [])\n for requirement in dependencies:\n name = requirement.get(\"name\", \"\").lower()\n if not name:\n continue\n\n # If the user defines a specific version in the\n # requirements file, we save it Otherwise, we don't\n # because we don't know which version will be\n # installed.\n version = \"undefined\"\n specs = str(requirement.get(\"specs\", \"\"))\n if specs:\n if specs.startswith(\"==\"):\n version = specs.replace(\"==\", \"\", 1)\n else:\n version = \"unknown\"\n\n results.append(\n {\n \"name\": name,\n \"version\": version,\n }\n )\n return results\n\n def _get_all_pip_packages(self):\n \"\"\"\n Get all the packages installed by pip.\n\n This includes top level and transitive dependencies.\n The output of ``pip list`` is in the form of::\n\n [\n {\n \"name\": \"requests-mock\",\n \"version\": \"1.8.0\"\n },\n {\n \"name\": \"requests-toolbelt\",\n \"version\": \"0.9.1\"\n },\n {\n \"name\": \"rstcheck\",\n \"version\": \"3.3.1\"\n },\n {\n \"name\": \"selectolax\",\n \"version\": \"0.2.10\"\n },\n {\n \"name\": \"slumber\",\n \"version\": \"0.7.1\"\n }\n ]\n \"\"\"\n cmd = [\n \"python\",\n \"-m\",\n \"pip\",\n \"list\",\n \"--pre\",\n \"--local\",\n \"--format\",\n \"json\",\n \"--not-required\",\n ]\n code, stdout, _ = self.run(*cmd)\n if code == 0 and stdout:\n return self._safe_json_loads(stdout, [])\n return []\n\n def _get_operating_system(self):\n \"\"\"\n Get the current operating system.\n\n The output of ``lsb_release --description`` is in the form of::\n\n Description:\tUbuntu 20.04.3 LTS\n \"\"\"\n code, stdout, _ = self.run(\"lsb_release\", \"--description\")\n stdout = stdout.strip()\n if code == 0 and stdout:\n parts = stdout.split(\"\\t\")\n if len(parts) == 2:\n return parts[1]\n return \"\"\n\n def _get_apt_packages(self):\n \"\"\"\n Get the list of installed apt packages (global and from the user).\n\n The current source of user installed packages is the config file,\n but we have only the name, so we take the version from the list of all\n installed packages.\n \"\"\"\n all_apt_packages = self._get_all_apt_packages()\n all_apt_packages_dict = {\n package[\"name\"]: package[\"version\"] for package in all_apt_packages\n }\n user_apt_packages = self._get_user_apt_packages()\n for package in user_apt_packages:\n package[\"version\"] = all_apt_packages_dict.get(package[\"name\"], \"\")\n return user_apt_packages, all_apt_packages\n\n def _get_all_apt_packages(self):\n \"\"\"\n Get all installed apt packages and their versions.\n\n The output of ``dpkg-query --show`` is the form of::\n\n adduser 3.116ubuntu1\n apt 1.6.14\n base-files 10.1ubuntu2.11\n base-passwd 3.5.44\n bash 4.4.18-2ubuntu1.2\n bsdutils 1:2.31.1-0.4ubuntu3.7\n bzip2 1.0.6-8.1ubuntu0.2\n coreutils 8.28-1ubuntu1\n dash 0.5.8-2.10\n debconf 1.5.66ubuntu1\n debianutils 4.8.4\n diffutils 1:3.6-1\n dpkg 1.19.0.5ubuntu2.3\n e2fsprogs 1.44.1-1ubuntu1.3\n fdisk 2.31.1-0.4ubuntu3.7\n findutils 4.6.0+git+20170828-2\n gcc-8-base 8.4.0-1ubuntu1~18.04\n gpgv 2.2.4-1ubuntu1.4\n grep 3.1-2build1\n gzip 1.6-5ubuntu1.2\n hostname 3.20\n \"\"\"\n code, stdout, _ = self.run(\n \"dpkg-query\", \"--showformat\", \"${package} ${version}\\\\n\", \"--show\"\n )\n stdout = stdout.strip()\n packages = []\n if code != 0 or not stdout:\n return packages\n\n for line in stdout.split(\"\\n\"):\n parts = line.split()\n if len(parts) == 2:\n package, version = parts\n packages.append(\n {\n \"name\": package.lower(),\n \"version\": version,\n }\n )\n\n return packages\n\n def _get_user_apt_packages(self):\n return [\n {\"name\": package.lower(), \"version\": \"\"}\n for package in self.config.build.apt_packages\n ]\n\n def _get_python_version(self):\n \"\"\"\n Get the python version currently used.\n\n The output of ``python --version`` is in the form of::\n\n Python 3.8.12\n \"\"\"\n code, stdout, _ = self.run(\"python\", \"--version\")\n stdout = stdout.strip()\n if code == 0 and stdout:\n parts = stdout.split()\n if len(parts) == 2:\n return parts[1]\n return \"\"\n", "path": "readthedocs/telemetry/collectors.py"}]}
| 3,784 | 361 |
gh_patches_debug_35959
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-2088
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
US-BPA failing due to changes in data layout and timezone
```shell
File "/home/contrib/parsers/US_BPA.py", line 113, in fetch_production
raw_data = get_data(GENERATION_URL, session=session)
File "/home/contrib/parsers/US_BPA.py", line 25, in get_data
df = pd.read_table(StringIO(req.text), skiprows=5)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 685, in parser_f
return _read(filepath_or_buffer, kwds)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 463, in _read
data = parser.read(nrows)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 1154, in read
ret = self._engine.read(nrows)
File "/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py", line 2059, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 881, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 896, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 950, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 937, in pandas._libs.parsers.TextReader._tokenize_rows
File "pandas/_libs/parsers.pyx", line 2132, in pandas._libs.parsers.raise_parser_error
pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 12, saw 6
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/US_BPA.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """Parser for the Bonneville Power Administration area of the USA."""
4
5 from datetime import datetime, timedelta
6 from io import StringIO
7 import logging
8 import pandas as pd
9 import pytz
10 import requests
11
12
13 GENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'
14
15 GENERATION_MAPPING = {'Wind': 'wind',
16 'Hydro': 'hydro',
17 'Fossil/Biomass': 'unknown',
18 'Nuclear': 'nuclear'}
19
20
21 def get_data(url, session=None):
22 """Returns a pandas dataframe."""
23 s=session or requests.Session()
24 req = s.get(url)
25 df = pd.read_table(StringIO(req.text), skiprows=5)
26
27 return df
28
29
30 def timestamp_converter(timestamp):
31 """Turns string representation of time into an aware datetime object."""
32
33 dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')
34 western = pytz.timezone('America/Los_Angeles')
35 dt_aware = western.localize(dt_naive)
36
37 return dt_aware
38
39
40 def data_processor(df, logger):
41 """
42 Takes a dataframe and drops all generation rows that are empty or more
43 than 1 day old. Turns each row into a dictionary and removes any generation
44 types that are unknown.
45 Returns a list of tuples in the form (datetime, production).
46 """
47
48 df= df.dropna(thresh=2)
49 df.columns = df.columns.str.strip()
50
51 # 5min data for the last 24 hours.
52 df = df.tail(288)
53 df['Date/Time'] = df['Date/Time'].map(timestamp_converter)
54
55 known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}
56 column_headers = set(df.columns)
57
58 unknown_keys = column_headers - known_keys
59
60 for k in unknown_keys:
61 logger.warning('New data {} seen in US-BPA data source'.format(k),
62 extra={'key': 'US-BPA'})
63
64 keys_to_remove = unknown_keys | {'Load'}
65
66 processed_data = []
67 for index, row in df.iterrows():
68 production = row.to_dict()
69
70 dt = production.pop('Date/Time')
71 dt = dt.to_pydatetime()
72 mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()
73 if k not in keys_to_remove}
74
75 processed_data.append((dt, mapped_production))
76
77 return processed_data
78
79
80 def fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
81 """
82 Requests the last known production mix (in MW) of a given zone
83 Arguments:
84 zone_key (optional) -- used in case a parser is able to fetch multiple zones
85 session (optional) -- request session passed in order to re-use an existing session
86 Return:
87 A dictionary in the form:
88 {
89 'zoneKey': 'FR',
90 'datetime': '2017-01-01T00:00:00Z',
91 'production': {
92 'biomass': 0.0,
93 'coal': 0.0,
94 'gas': 0.0,
95 'hydro': 0.0,
96 'nuclear': null,
97 'oil': 0.0,
98 'solar': 0.0,
99 'wind': 0.0,
100 'geothermal': 0.0,
101 'unknown': 0.0
102 },
103 'storage': {
104 'hydro': -10.0,
105 },
106 'source': 'mysource.com'
107 }
108 """
109
110 if target_datetime:
111 raise NotImplementedError('This parser is not yet able to parse past dates')
112
113 raw_data = get_data(GENERATION_URL, session=session)
114 processed_data = data_processor(raw_data, logger)
115
116 data = []
117 for item in processed_data:
118 datapoint = {'zoneKey': zone_key,
119 'datetime': item[0],
120 'production': item[1],
121 'storage': {},
122 'source': 'bpa.gov'}
123
124 data.append(datapoint)
125
126 return data
127
128
129 if __name__ == '__main__':
130 print('fetch_production() ->')
131 print(fetch_production())
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/US_BPA.py b/parsers/US_BPA.py
--- a/parsers/US_BPA.py
+++ b/parsers/US_BPA.py
@@ -2,11 +2,11 @@
"""Parser for the Bonneville Power Administration area of the USA."""
-from datetime import datetime, timedelta
+
from io import StringIO
+import arrow
import logging
import pandas as pd
-import pytz
import requests
@@ -20,19 +20,18 @@
def get_data(url, session=None):
"""Returns a pandas dataframe."""
- s=session or requests.Session()
+ s = session or requests.Session()
req = s.get(url)
- df = pd.read_table(StringIO(req.text), skiprows=5)
+ df = pd.read_table(StringIO(req.text), skiprows=11)
return df
def timestamp_converter(timestamp):
- """Turns string representation of time into an aware datetime object."""
+ """Turns a timestamp str into an aware datetime object."""
- dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')
- western = pytz.timezone('America/Los_Angeles')
- dt_aware = western.localize(dt_naive)
+ arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')
+ dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime
return dt_aware
@@ -45,7 +44,7 @@
Returns a list of tuples in the form (datetime, production).
"""
- df= df.dropna(thresh=2)
+ df = df.dropna(thresh=2)
df.columns = df.columns.str.strip()
# 5min data for the last 24 hours.
@@ -59,7 +58,7 @@
for k in unknown_keys:
logger.warning('New data {} seen in US-BPA data source'.format(k),
- extra={'key': 'US-BPA'})
+ extra={'key': 'US-BPA'})
keys_to_remove = unknown_keys | {'Load'}
@@ -69,7 +68,7 @@
dt = production.pop('Date/Time')
dt = dt.to_pydatetime()
- mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()
+ mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()
if k not in keys_to_remove}
processed_data.append((dt, mapped_production))
|
{"golden_diff": "diff --git a/parsers/US_BPA.py b/parsers/US_BPA.py\n--- a/parsers/US_BPA.py\n+++ b/parsers/US_BPA.py\n@@ -2,11 +2,11 @@\n \n \"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n \n-from datetime import datetime, timedelta\n+\n from io import StringIO\n+import arrow\n import logging\n import pandas as pd\n-import pytz\n import requests\n \n \n@@ -20,19 +20,18 @@\n \n def get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n- s=session or requests.Session()\n+ s = session or requests.Session()\n req = s.get(url)\n- df = pd.read_table(StringIO(req.text), skiprows=5)\n+ df = pd.read_table(StringIO(req.text), skiprows=11)\n \n return df\n \n \n def timestamp_converter(timestamp):\n- \"\"\"Turns string representation of time into an aware datetime object.\"\"\"\n+ \"\"\"Turns a timestamp str into an aware datetime object.\"\"\"\n \n- dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')\n- western = pytz.timezone('America/Los_Angeles')\n- dt_aware = western.localize(dt_naive)\n+ arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')\n+ dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime\n \n return dt_aware\n \n@@ -45,7 +44,7 @@\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n \n- df= df.dropna(thresh=2)\n+ df = df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n \n # 5min data for the last 24 hours.\n@@ -59,7 +58,7 @@\n \n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n- extra={'key': 'US-BPA'})\n+ extra={'key': 'US-BPA'})\n \n keys_to_remove = unknown_keys | {'Load'}\n \n@@ -69,7 +68,7 @@\n \n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n- mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()\n+ mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()\n if k not in keys_to_remove}\n \n processed_data.append((dt, mapped_production))\n", "issue": "US-BPA failing due to changes in data layout and timezone \n```shell\r\n File \"/home/contrib/parsers/US_BPA.py\", line 113, in fetch_production\r\n raw_data = get_data(GENERATION_URL, session=session)\r\n File \"/home/contrib/parsers/US_BPA.py\", line 25, in get_data\r\n df = pd.read_table(StringIO(req.text), skiprows=5)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"/usr/local/lib/python3.6/site-packages/pandas/io/parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 881, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 896, in pandas._libs.parsers.TextReader._read_low_memory\r\n File \"pandas/_libs/parsers.pyx\", line 950, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 937, in pandas._libs.parsers.TextReader._tokenize_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 12, saw 6\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom io import StringIO\nimport logging\nimport pandas as pd\nimport pytz\nimport requests\n\n\nGENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'\n\nGENERATION_MAPPING = {'Wind': 'wind',\n 'Hydro': 'hydro',\n 'Fossil/Biomass': 'unknown',\n 'Nuclear': 'nuclear'}\n\n\ndef get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n s=session or requests.Session()\n req = s.get(url)\n df = pd.read_table(StringIO(req.text), skiprows=5)\n\n return df\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Turns string representation of time into an aware datetime object.\"\"\"\n\n dt_naive = datetime.strptime(timestamp, '%m/%d/%Y %H:%M')\n western = pytz.timezone('America/Los_Angeles')\n dt_aware = western.localize(dt_naive)\n\n return dt_aware\n\n\ndef data_processor(df, logger):\n \"\"\"\n Takes a dataframe and drops all generation rows that are empty or more\n than 1 day old. Turns each row into a dictionary and removes any generation\n types that are unknown.\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n\n df= df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n\n # 5min data for the last 24 hours.\n df = df.tail(288)\n df['Date/Time'] = df['Date/Time'].map(timestamp_converter)\n\n known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}\n column_headers = set(df.columns)\n\n unknown_keys = column_headers - known_keys\n\n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n extra={'key': 'US-BPA'})\n\n keys_to_remove = unknown_keys | {'Load'}\n\n processed_data = []\n for index, row in df.iterrows():\n production = row.to_dict()\n\n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n mapped_production = {GENERATION_MAPPING[k]:v for k,v in production.items()\n if k not in keys_to_remove}\n\n processed_data.append((dt, mapped_production))\n\n return processed_data\n\n\ndef fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(GENERATION_URL, session=session)\n processed_data = data_processor(raw_data, logger)\n\n data = []\n for item in processed_data:\n datapoint = {'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'bpa.gov'}\n\n data.append(datapoint)\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_BPA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for the Bonneville Power Administration area of the USA.\"\"\"\n\n\nfrom io import StringIO\nimport arrow\nimport logging\nimport pandas as pd\nimport requests\n\n\nGENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'\n\nGENERATION_MAPPING = {'Wind': 'wind',\n 'Hydro': 'hydro',\n 'Fossil/Biomass': 'unknown',\n 'Nuclear': 'nuclear'}\n\n\ndef get_data(url, session=None):\n \"\"\"Returns a pandas dataframe.\"\"\"\n s = session or requests.Session()\n req = s.get(url)\n df = pd.read_table(StringIO(req.text), skiprows=11)\n\n return df\n\n\ndef timestamp_converter(timestamp):\n \"\"\"Turns a timestamp str into an aware datetime object.\"\"\"\n\n arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')\n dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime\n\n return dt_aware\n\n\ndef data_processor(df, logger):\n \"\"\"\n Takes a dataframe and drops all generation rows that are empty or more\n than 1 day old. Turns each row into a dictionary and removes any generation\n types that are unknown.\n Returns a list of tuples in the form (datetime, production).\n \"\"\"\n\n df = df.dropna(thresh=2)\n df.columns = df.columns.str.strip()\n\n # 5min data for the last 24 hours.\n df = df.tail(288)\n df['Date/Time'] = df['Date/Time'].map(timestamp_converter)\n\n known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}\n column_headers = set(df.columns)\n\n unknown_keys = column_headers - known_keys\n\n for k in unknown_keys:\n logger.warning('New data {} seen in US-BPA data source'.format(k),\n extra={'key': 'US-BPA'})\n\n keys_to_remove = unknown_keys | {'Load'}\n\n processed_data = []\n for index, row in df.iterrows():\n production = row.to_dict()\n\n dt = production.pop('Date/Time')\n dt = dt.to_pydatetime()\n mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()\n if k not in keys_to_remove}\n\n processed_data.append((dt, mapped_production))\n\n return processed_data\n\n\ndef fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given zone\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple zones\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_data = get_data(GENERATION_URL, session=session)\n processed_data = data_processor(raw_data, logger)\n\n data = []\n for item in processed_data:\n datapoint = {'zoneKey': zone_key,\n 'datetime': item[0],\n 'production': item[1],\n 'storage': {},\n 'source': 'bpa.gov'}\n\n data.append(datapoint)\n\n return data\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/US_BPA.py"}]}
| 1,908 | 568 |
gh_patches_debug_7925
|
rasdani/github-patches
|
git_diff
|
lra__mackup-1718
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
usability / doc issues (minor / feature request)
On ignoring Mackup since I installed it for multiple accounts, I'm now returning to working with it to bring some more machines into the group and reorganise. Here are some minor issues I ran into getting back up to speed:
* `mackup uninstall` says:
Every configuration file, setting and dotfile managed by Mackup will be unlinked and moved back to their original place, in your home folder.
Are you sure ? <Yes|No>yes
This is scary and actually incorrect. Please change 'moved' to 'copied' (obviously Mackup does the correct cp as mv would be an issue for all the other accounts).
* Would be nice to have a few lines in Bull*/backup on "What if I run `mackup backup` twice?" and/or maybe "How do I add another application later after the initial setup?" Again, obviously Mackup does the right thing, but my (pedantic?) reading of the documentation suggests it might not. The --dry-run option already helps with this.
* it would be **way cool** if `mackup list` also reported things like: 'configuration files detected' and 'configuration already in Mackup', and for these cases also 'mackup.cfg includes' (implicit or explicit with applications_to_sync) or 'mackup.cfg excludes'
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mackup/main.py`
Content:
```
1 """Mackup.
2
3 Keep your application settings in sync.
4 Copyright (C) 2013-2021 Laurent Raufaste <http://glop.org/>
5
6 Usage:
7 mackup list
8 mackup [options] backup
9 mackup [options] restore
10 mackup show <application>
11 mackup [options] uninstall
12 mackup (-h | --help)
13 mackup --version
14
15 Options:
16 -h --help Show this screen.
17 -f --force Force every question asked to be answered with "Yes".
18 -r --root Allow mackup to be run as superuser.
19 -n --dry-run Show steps without executing.
20 -v --verbose Show additional details.
21 --version Show version.
22
23 Modes of action:
24 1. list: display a list of all supported applications.
25 2. backup: sync your conf files to your synced storage, use this the 1st time
26 you use Mackup.
27 3. restore: link the conf files already in your synced storage on your system,
28 use it on any new system you use.
29 4. uninstall: reset everything as it was before using Mackup.
30
31 By default, Mackup syncs all application data via
32 Dropbox, but may be configured to exclude applications or use a different
33 backend with a .mackup.cfg file.
34
35 See https://github.com/lra/mackup/tree/master/doc for more information.
36
37 """
38 from docopt import docopt
39 from .appsdb import ApplicationsDatabase
40 from .application import ApplicationProfile
41 from .constants import MACKUP_APP_NAME, VERSION
42 from .mackup import Mackup
43 from . import utils
44 import sys
45
46
47 class ColorFormatCodes:
48 BLUE = "\033[34m"
49 BOLD = "\033[1m"
50 NORMAL = "\033[0m"
51
52
53 def header(str):
54 return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL
55
56
57 def bold(str):
58 return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL
59
60
61 def main():
62 """Main function."""
63 # Get the command line arg
64 args = docopt(__doc__, version="Mackup {}".format(VERSION))
65
66 mckp = Mackup()
67 app_db = ApplicationsDatabase()
68
69 def printAppHeader(app_name):
70 if verbose:
71 print(("\n{0} {1} {0}").format(header("---"), bold(app_name)))
72
73 # If we want to answer mackup with "yes" for each question
74 if args["--force"]:
75 utils.FORCE_YES = True
76
77 # Allow mackup to be run as root
78 if args["--root"]:
79 utils.CAN_RUN_AS_ROOT = True
80
81 dry_run = args["--dry-run"]
82
83 verbose = args["--verbose"]
84
85 if args["backup"]:
86 # Check the env where the command is being run
87 mckp.check_for_usable_backup_env()
88
89 # Backup each application
90 for app_name in sorted(mckp.get_apps_to_backup()):
91 app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)
92 printAppHeader(app_name)
93 app.backup()
94
95 elif args["restore"]:
96 # Check the env where the command is being run
97 mckp.check_for_usable_restore_env()
98
99 # Restore the Mackup config before any other config, as we might need
100 # it to know about custom settings
101 mackup_app = ApplicationProfile(
102 mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose
103 )
104 printAppHeader(MACKUP_APP_NAME)
105 mackup_app.restore()
106
107 # Initialize again the apps db, as the Mackup config might have changed
108 # it
109 mckp = Mackup()
110 app_db = ApplicationsDatabase()
111
112 # Restore the rest of the app configs, using the restored Mackup config
113 app_names = mckp.get_apps_to_backup()
114 # Mackup has already been done
115 app_names.discard(MACKUP_APP_NAME)
116
117 for app_name in sorted(app_names):
118 app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)
119 printAppHeader(app_name)
120 app.restore()
121
122 elif args["uninstall"]:
123 # Check the env where the command is being run
124 mckp.check_for_usable_restore_env()
125
126 if dry_run or (
127 utils.confirm(
128 "You are going to uninstall Mackup.\n"
129 "Every configuration file, setting and dotfile"
130 " managed by Mackup will be unlinked and moved back"
131 " to their original place, in your home folder.\n"
132 "Are you sure?"
133 )
134 ):
135
136 # Uninstall the apps except Mackup, which we'll uninstall last, to
137 # keep the settings as long as possible
138 app_names = mckp.get_apps_to_backup()
139 app_names.discard(MACKUP_APP_NAME)
140
141 for app_name in sorted(app_names):
142 app = ApplicationProfile(
143 mckp, app_db.get_files(app_name), dry_run, verbose
144 )
145 printAppHeader(app_name)
146 app.uninstall()
147
148 # Restore the Mackup config before any other config, as we might
149 # need it to know about custom settings
150 mackup_app = ApplicationProfile(
151 mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose
152 )
153 mackup_app.uninstall()
154
155 # Delete the Mackup folder in Dropbox
156 # Don't delete this as there might be other Macs that aren't
157 # uninstalled yet
158 # delete(mckp.mackup_folder)
159
160 print(
161 "\n"
162 "All your files have been put back into place. You can now"
163 " safely uninstall Mackup.\n"
164 "\n"
165 "Thanks for using Mackup!"
166 )
167
168 elif args["list"]:
169 # Display the list of supported applications
170 mckp.check_for_usable_environment()
171 output = "Supported applications:\n"
172 for app_name in sorted(app_db.get_app_names()):
173 output += " - {}\n".format(app_name)
174 output += "\n"
175 output += "{} applications supported in Mackup v{}".format(
176 len(app_db.get_app_names()), VERSION
177 )
178 print(output)
179
180 elif args["show"]:
181 mckp.check_for_usable_environment()
182 app_name = args["<application>"]
183
184 # Make sure the app exists
185 if app_name not in app_db.get_app_names():
186 sys.exit("Unsupported application: {}".format(app_name))
187 print("Name: {}".format(app_db.get_name(app_name)))
188 print("Configuration files:")
189 for file in app_db.get_files(app_name):
190 print(" - {}".format(file))
191
192 # Delete the tmp folder
193 mckp.clean_temp_folder()
194
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mackup/main.py b/mackup/main.py
--- a/mackup/main.py
+++ b/mackup/main.py
@@ -127,7 +127,7 @@
utils.confirm(
"You are going to uninstall Mackup.\n"
"Every configuration file, setting and dotfile"
- " managed by Mackup will be unlinked and moved back"
+ " managed by Mackup will be unlinked and copied back"
" to their original place, in your home folder.\n"
"Are you sure?"
)
|
{"golden_diff": "diff --git a/mackup/main.py b/mackup/main.py\n--- a/mackup/main.py\n+++ b/mackup/main.py\n@@ -127,7 +127,7 @@\n utils.confirm(\n \"You are going to uninstall Mackup.\\n\"\n \"Every configuration file, setting and dotfile\"\n- \" managed by Mackup will be unlinked and moved back\"\n+ \" managed by Mackup will be unlinked and copied back\"\n \" to their original place, in your home folder.\\n\"\n \"Are you sure?\"\n )\n", "issue": "usability / doc issues (minor / feature request)\nOn ignoring Mackup since I installed it for multiple accounts, I'm now returning to working with it to bring some more machines into the group and reorganise. Here are some minor issues I ran into getting back up to speed:\r\n\r\n* `mackup uninstall` says: \r\n Every configuration file, setting and dotfile managed by Mackup will be unlinked and moved back to their original place, in your home folder.\r\n Are you sure ? <Yes|No>yes\r\n\r\nThis is scary and actually incorrect. Please change 'moved' to 'copied' (obviously Mackup does the correct cp as mv would be an issue for all the other accounts).\r\n\r\n* Would be nice to have a few lines in Bull*/backup on \"What if I run `mackup backup` twice?\" and/or maybe \"How do I add another application later after the initial setup?\" Again, obviously Mackup does the right thing, but my (pedantic?) reading of the documentation suggests it might not. The --dry-run option already helps with this.\r\n\r\n* it would be **way cool** if `mackup list` also reported things like: 'configuration files detected' and 'configuration already in Mackup', and for these cases also 'mackup.cfg includes' (implicit or explicit with applications_to_sync) or 'mackup.cfg excludes' \r\n\r\nThanks!\n", "before_files": [{"content": "\"\"\"Mackup.\n\nKeep your application settings in sync.\nCopyright (C) 2013-2021 Laurent Raufaste <http://glop.org/>\n\nUsage:\n mackup list\n mackup [options] backup\n mackup [options] restore\n mackup show <application>\n mackup [options] uninstall\n mackup (-h | --help)\n mackup --version\n\nOptions:\n -h --help Show this screen.\n -f --force Force every question asked to be answered with \"Yes\".\n -r --root Allow mackup to be run as superuser.\n -n --dry-run Show steps without executing.\n -v --verbose Show additional details.\n --version Show version.\n\nModes of action:\n 1. list: display a list of all supported applications.\n 2. backup: sync your conf files to your synced storage, use this the 1st time\n you use Mackup.\n 3. restore: link the conf files already in your synced storage on your system,\n use it on any new system you use.\n 4. uninstall: reset everything as it was before using Mackup.\n\nBy default, Mackup syncs all application data via\nDropbox, but may be configured to exclude applications or use a different\nbackend with a .mackup.cfg file.\n\nSee https://github.com/lra/mackup/tree/master/doc for more information.\n\n\"\"\"\nfrom docopt import docopt\nfrom .appsdb import ApplicationsDatabase\nfrom .application import ApplicationProfile\nfrom .constants import MACKUP_APP_NAME, VERSION\nfrom .mackup import Mackup\nfrom . import utils\nimport sys\n\n\nclass ColorFormatCodes:\n BLUE = \"\\033[34m\"\n BOLD = \"\\033[1m\"\n NORMAL = \"\\033[0m\"\n\n\ndef header(str):\n return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL\n\n\ndef bold(str):\n return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n # Get the command line arg\n args = docopt(__doc__, version=\"Mackup {}\".format(VERSION))\n\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n def printAppHeader(app_name):\n if verbose:\n print((\"\\n{0} {1} {0}\").format(header(\"---\"), bold(app_name)))\n\n # If we want to answer mackup with \"yes\" for each question\n if args[\"--force\"]:\n utils.FORCE_YES = True\n\n # Allow mackup to be run as root\n if args[\"--root\"]:\n utils.CAN_RUN_AS_ROOT = True\n\n dry_run = args[\"--dry-run\"]\n\n verbose = args[\"--verbose\"]\n\n if args[\"backup\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_backup_env()\n\n # Backup each application\n for app_name in sorted(mckp.get_apps_to_backup()):\n app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)\n printAppHeader(app_name)\n app.backup()\n\n elif args[\"restore\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n # Restore the Mackup config before any other config, as we might need\n # it to know about custom settings\n mackup_app = ApplicationProfile(\n mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose\n )\n printAppHeader(MACKUP_APP_NAME)\n mackup_app.restore()\n\n # Initialize again the apps db, as the Mackup config might have changed\n # it\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n # Restore the rest of the app configs, using the restored Mackup config\n app_names = mckp.get_apps_to_backup()\n # Mackup has already been done\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)\n printAppHeader(app_name)\n app.restore()\n\n elif args[\"uninstall\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n if dry_run or (\n utils.confirm(\n \"You are going to uninstall Mackup.\\n\"\n \"Every configuration file, setting and dotfile\"\n \" managed by Mackup will be unlinked and moved back\"\n \" to their original place, in your home folder.\\n\"\n \"Are you sure?\"\n )\n ):\n\n # Uninstall the apps except Mackup, which we'll uninstall last, to\n # keep the settings as long as possible\n app_names = mckp.get_apps_to_backup()\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(\n mckp, app_db.get_files(app_name), dry_run, verbose\n )\n printAppHeader(app_name)\n app.uninstall()\n\n # Restore the Mackup config before any other config, as we might\n # need it to know about custom settings\n mackup_app = ApplicationProfile(\n mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose\n )\n mackup_app.uninstall()\n\n # Delete the Mackup folder in Dropbox\n # Don't delete this as there might be other Macs that aren't\n # uninstalled yet\n # delete(mckp.mackup_folder)\n\n print(\n \"\\n\"\n \"All your files have been put back into place. You can now\"\n \" safely uninstall Mackup.\\n\"\n \"\\n\"\n \"Thanks for using Mackup!\"\n )\n\n elif args[\"list\"]:\n # Display the list of supported applications\n mckp.check_for_usable_environment()\n output = \"Supported applications:\\n\"\n for app_name in sorted(app_db.get_app_names()):\n output += \" - {}\\n\".format(app_name)\n output += \"\\n\"\n output += \"{} applications supported in Mackup v{}\".format(\n len(app_db.get_app_names()), VERSION\n )\n print(output)\n\n elif args[\"show\"]:\n mckp.check_for_usable_environment()\n app_name = args[\"<application>\"]\n\n # Make sure the app exists\n if app_name not in app_db.get_app_names():\n sys.exit(\"Unsupported application: {}\".format(app_name))\n print(\"Name: {}\".format(app_db.get_name(app_name)))\n print(\"Configuration files:\")\n for file in app_db.get_files(app_name):\n print(\" - {}\".format(file))\n\n # Delete the tmp folder\n mckp.clean_temp_folder()\n", "path": "mackup/main.py"}], "after_files": [{"content": "\"\"\"Mackup.\n\nKeep your application settings in sync.\nCopyright (C) 2013-2021 Laurent Raufaste <http://glop.org/>\n\nUsage:\n mackup list\n mackup [options] backup\n mackup [options] restore\n mackup show <application>\n mackup [options] uninstall\n mackup (-h | --help)\n mackup --version\n\nOptions:\n -h --help Show this screen.\n -f --force Force every question asked to be answered with \"Yes\".\n -r --root Allow mackup to be run as superuser.\n -n --dry-run Show steps without executing.\n -v --verbose Show additional details.\n --version Show version.\n\nModes of action:\n 1. list: display a list of all supported applications.\n 2. backup: sync your conf files to your synced storage, use this the 1st time\n you use Mackup.\n 3. restore: link the conf files already in your synced storage on your system,\n use it on any new system you use.\n 4. uninstall: reset everything as it was before using Mackup.\n\nBy default, Mackup syncs all application data via\nDropbox, but may be configured to exclude applications or use a different\nbackend with a .mackup.cfg file.\n\nSee https://github.com/lra/mackup/tree/master/doc for more information.\n\n\"\"\"\nfrom docopt import docopt\nfrom .appsdb import ApplicationsDatabase\nfrom .application import ApplicationProfile\nfrom .constants import MACKUP_APP_NAME, VERSION\nfrom .mackup import Mackup\nfrom . import utils\nimport sys\n\n\nclass ColorFormatCodes:\n BLUE = \"\\033[34m\"\n BOLD = \"\\033[1m\"\n NORMAL = \"\\033[0m\"\n\n\ndef header(str):\n return ColorFormatCodes.BLUE + str + ColorFormatCodes.NORMAL\n\n\ndef bold(str):\n return ColorFormatCodes.BOLD + str + ColorFormatCodes.NORMAL\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n # Get the command line arg\n args = docopt(__doc__, version=\"Mackup {}\".format(VERSION))\n\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n def printAppHeader(app_name):\n if verbose:\n print((\"\\n{0} {1} {0}\").format(header(\"---\"), bold(app_name)))\n\n # If we want to answer mackup with \"yes\" for each question\n if args[\"--force\"]:\n utils.FORCE_YES = True\n\n # Allow mackup to be run as root\n if args[\"--root\"]:\n utils.CAN_RUN_AS_ROOT = True\n\n dry_run = args[\"--dry-run\"]\n\n verbose = args[\"--verbose\"]\n\n if args[\"backup\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_backup_env()\n\n # Backup each application\n for app_name in sorted(mckp.get_apps_to_backup()):\n app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)\n printAppHeader(app_name)\n app.backup()\n\n elif args[\"restore\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n # Restore the Mackup config before any other config, as we might need\n # it to know about custom settings\n mackup_app = ApplicationProfile(\n mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose\n )\n printAppHeader(MACKUP_APP_NAME)\n mackup_app.restore()\n\n # Initialize again the apps db, as the Mackup config might have changed\n # it\n mckp = Mackup()\n app_db = ApplicationsDatabase()\n\n # Restore the rest of the app configs, using the restored Mackup config\n app_names = mckp.get_apps_to_backup()\n # Mackup has already been done\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(mckp, app_db.get_files(app_name), dry_run, verbose)\n printAppHeader(app_name)\n app.restore()\n\n elif args[\"uninstall\"]:\n # Check the env where the command is being run\n mckp.check_for_usable_restore_env()\n\n if dry_run or (\n utils.confirm(\n \"You are going to uninstall Mackup.\\n\"\n \"Every configuration file, setting and dotfile\"\n \" managed by Mackup will be unlinked and copied back\"\n \" to their original place, in your home folder.\\n\"\n \"Are you sure?\"\n )\n ):\n\n # Uninstall the apps except Mackup, which we'll uninstall last, to\n # keep the settings as long as possible\n app_names = mckp.get_apps_to_backup()\n app_names.discard(MACKUP_APP_NAME)\n\n for app_name in sorted(app_names):\n app = ApplicationProfile(\n mckp, app_db.get_files(app_name), dry_run, verbose\n )\n printAppHeader(app_name)\n app.uninstall()\n\n # Restore the Mackup config before any other config, as we might\n # need it to know about custom settings\n mackup_app = ApplicationProfile(\n mckp, app_db.get_files(MACKUP_APP_NAME), dry_run, verbose\n )\n mackup_app.uninstall()\n\n # Delete the Mackup folder in Dropbox\n # Don't delete this as there might be other Macs that aren't\n # uninstalled yet\n # delete(mckp.mackup_folder)\n\n print(\n \"\\n\"\n \"All your files have been put back into place. You can now\"\n \" safely uninstall Mackup.\\n\"\n \"\\n\"\n \"Thanks for using Mackup!\"\n )\n\n elif args[\"list\"]:\n # Display the list of supported applications\n mckp.check_for_usable_environment()\n output = \"Supported applications:\\n\"\n for app_name in sorted(app_db.get_app_names()):\n output += \" - {}\\n\".format(app_name)\n output += \"\\n\"\n output += \"{} applications supported in Mackup v{}\".format(\n len(app_db.get_app_names()), VERSION\n )\n print(output)\n\n elif args[\"show\"]:\n mckp.check_for_usable_environment()\n app_name = args[\"<application>\"]\n\n # Make sure the app exists\n if app_name not in app_db.get_app_names():\n sys.exit(\"Unsupported application: {}\".format(app_name))\n print(\"Name: {}\".format(app_db.get_name(app_name)))\n print(\"Configuration files:\")\n for file in app_db.get_files(app_name):\n print(\" - {}\".format(file))\n\n # Delete the tmp folder\n mckp.clean_temp_folder()\n", "path": "mackup/main.py"}]}
| 2,548 | 125 |
gh_patches_debug_3683
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5883
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test bokehjs/examples and run them on travis ci
They are constantly broken due to not being continuously tested.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting/file/airports_map.py`
Content:
```
1 from __future__ import print_function
2
3 from bokeh.layouts import row
4 from bokeh.models import (
5 Range1d, WMTSTileSource, ColumnDataSource, HoverTool,
6 )
7 from bokeh.plotting import figure, show, output_file
8 from bokeh.sampledata.airports import data as airports
9 from bokeh.tile_providers import CARTODBPOSITRON
10
11 points_source = ColumnDataSource(airports)
12 title = "US Airports: Field Elevation > 1500m"
13
14 def plot(tile_source, filename):
15 output_file(filename, title=title)
16
17 # set to roughly extent of points
18 x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)
19 y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)
20
21 # create plot and add tools
22 p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)
23 p.axis.visible = False
24 hover_tool = HoverTool(tooltips=[("Name", "@name"), ("Elevation", "@elevation (m)")])
25 p.add_tools(hover_tool)
26 p.add_tile(tile_source)
27
28 # create point glyphs
29 p.circle(x='x', y='y', size=9, fill_color="#60ACA1", line_color="#D2C4C1", line_width=1.5, source=points_source)
30 return p
31
32 # create a tile source
33 tile_options = {}
34 tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
35 mq_tile_source = WMTSTileSource(**tile_options)
36
37 carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
38 mq = plot(mq_tile_source, 'airports_map.html')
39
40 show(row([carto, mq]))
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py
--- a/examples/plotting/file/airports_map.py
+++ b/examples/plotting/file/airports_map.py
@@ -31,7 +31,7 @@
# create a tile source
tile_options = {}
-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
mq_tile_source = WMTSTileSource(**tile_options)
carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
|
{"golden_diff": "diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py\n--- a/examples/plotting/file/airports_map.py\n+++ b/examples/plotting/file/airports_map.py\n@@ -31,7 +31,7 @@\n \n # create a tile source\n tile_options = {}\n-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n mq_tile_source = WMTSTileSource(**tile_options)\n \n carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\n", "issue": "Test bokehjs/examples and run them on travis ci\nThey are constantly broken due to not being continuously tested.\n", "before_files": [{"content": "from __future__ import print_function\n\nfrom bokeh.layouts import row\nfrom bokeh.models import (\n Range1d, WMTSTileSource, ColumnDataSource, HoverTool,\n)\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.airports import data as airports\nfrom bokeh.tile_providers import CARTODBPOSITRON\n\npoints_source = ColumnDataSource(airports)\ntitle = \"US Airports: Field Elevation > 1500m\"\n\ndef plot(tile_source, filename):\n output_file(filename, title=title)\n\n # set to roughly extent of points\n x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)\n y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)\n\n # create plot and add tools\n p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)\n p.axis.visible = False\n hover_tool = HoverTool(tooltips=[(\"Name\", \"@name\"), (\"Elevation\", \"@elevation (m)\")])\n p.add_tools(hover_tool)\n p.add_tile(tile_source)\n\n # create point glyphs\n p.circle(x='x', y='y', size=9, fill_color=\"#60ACA1\", line_color=\"#D2C4C1\", line_width=1.5, source=points_source)\n return p\n\n# create a tile source\ntile_options = {}\ntile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\nmq_tile_source = WMTSTileSource(**tile_options)\n\ncarto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\nmq = plot(mq_tile_source, 'airports_map.html')\n\nshow(row([carto, mq]))\n", "path": "examples/plotting/file/airports_map.py"}], "after_files": [{"content": "from __future__ import print_function\n\nfrom bokeh.layouts import row\nfrom bokeh.models import (\n Range1d, WMTSTileSource, ColumnDataSource, HoverTool,\n)\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.airports import data as airports\nfrom bokeh.tile_providers import CARTODBPOSITRON\n\npoints_source = ColumnDataSource(airports)\ntitle = \"US Airports: Field Elevation > 1500m\"\n\ndef plot(tile_source, filename):\n output_file(filename, title=title)\n\n # set to roughly extent of points\n x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)\n y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)\n\n # create plot and add tools\n p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)\n p.axis.visible = False\n hover_tool = HoverTool(tooltips=[(\"Name\", \"@name\"), (\"Elevation\", \"@elevation (m)\")])\n p.add_tools(hover_tool)\n p.add_tile(tile_source)\n\n # create point glyphs\n p.circle(x='x', y='y', size=9, fill_color=\"#60ACA1\", line_color=\"#D2C4C1\", line_width=1.5, source=points_source)\n return p\n\n# create a tile source\ntile_options = {}\n# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\nmq_tile_source = WMTSTileSource(**tile_options)\n\ncarto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\nmq = plot(mq_tile_source, 'airports_map.html')\n\nshow(row([carto, mq]))\n", "path": "examples/plotting/file/airports_map.py"}]}
| 820 | 180 |
gh_patches_debug_22234
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-4466
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot find Apache control command apache2ctl on Arch Linux
I have this problem when using certbot for apache following this guide http://letsencrypt.readthedocs.io/en/latest/using.html#apache
```
$ sudo certbot --apache
Saving debug log to /var/log/letsencrypt/letsencrypt.log
Failed to find apache2ctl in expanded PATH: /usr/local/sbin:/usr/local/bin:/usr/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl:/usr/sbin
The apache plugin is not working; there may be problems with your existing configuration.
The error was: NoInstallationError('Cannot find Apache control command apache2ctl',)
```
I am on Arch Linux, and it seems that there is no apache2ctl for Arch but certbot requires it?
I tried to installed it but no luck:
```
$ sudo pacman -S apache2
error: target not found: apache2
```
My Apache's version:
```
$ httpd -v
Server version: Apache/2.4.25 (Unix)
Server built: Dec 20 2016 13:02:2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-apache/certbot_apache/constants.py`
Content:
```
1 """Apache plugin constants."""
2 import pkg_resources
3 from certbot import util
4
5 CLI_DEFAULTS_DEFAULT = dict(
6 server_root="/etc/apache2",
7 vhost_root="/etc/apache2/sites-available",
8 vhost_files="*",
9 logs_root="/var/log/apache2",
10 version_cmd=['apache2ctl', '-v'],
11 define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
12 restart_cmd=['apache2ctl', 'graceful'],
13 conftest_cmd=['apache2ctl', 'configtest'],
14 enmod=None,
15 dismod=None,
16 le_vhost_ext="-le-ssl.conf",
17 handle_mods=False,
18 handle_sites=False,
19 challenge_location="/etc/apache2",
20 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
21 "certbot_apache", "options-ssl-apache.conf")
22 )
23 CLI_DEFAULTS_DEBIAN = dict(
24 server_root="/etc/apache2",
25 vhost_root="/etc/apache2/sites-available",
26 vhost_files="*",
27 logs_root="/var/log/apache2",
28 version_cmd=['apache2ctl', '-v'],
29 define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
30 restart_cmd=['apache2ctl', 'graceful'],
31 conftest_cmd=['apache2ctl', 'configtest'],
32 enmod="a2enmod",
33 dismod="a2dismod",
34 le_vhost_ext="-le-ssl.conf",
35 handle_mods=True,
36 handle_sites=True,
37 challenge_location="/etc/apache2",
38 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
39 "certbot_apache", "options-ssl-apache.conf")
40 )
41 CLI_DEFAULTS_CENTOS = dict(
42 server_root="/etc/httpd",
43 vhost_root="/etc/httpd/conf.d",
44 vhost_files="*.conf",
45 logs_root="/var/log/httpd",
46 version_cmd=['apachectl', '-v'],
47 define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],
48 restart_cmd=['apachectl', 'graceful'],
49 conftest_cmd=['apachectl', 'configtest'],
50 enmod=None,
51 dismod=None,
52 le_vhost_ext="-le-ssl.conf",
53 handle_mods=False,
54 handle_sites=False,
55 challenge_location="/etc/httpd/conf.d",
56 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
57 "certbot_apache", "centos-options-ssl-apache.conf")
58 )
59 CLI_DEFAULTS_GENTOO = dict(
60 server_root="/etc/apache2",
61 vhost_root="/etc/apache2/vhosts.d",
62 vhost_files="*.conf",
63 logs_root="/var/log/apache2",
64 version_cmd=['/usr/sbin/apache2', '-v'],
65 define_cmd=['apache2ctl', 'virtualhosts'],
66 restart_cmd=['apache2ctl', 'graceful'],
67 conftest_cmd=['apache2ctl', 'configtest'],
68 enmod=None,
69 dismod=None,
70 le_vhost_ext="-le-ssl.conf",
71 handle_mods=False,
72 handle_sites=False,
73 challenge_location="/etc/apache2/vhosts.d",
74 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
75 "certbot_apache", "options-ssl-apache.conf")
76 )
77 CLI_DEFAULTS_DARWIN = dict(
78 server_root="/etc/apache2",
79 vhost_root="/etc/apache2/other",
80 vhost_files="*.conf",
81 logs_root="/var/log/apache2",
82 version_cmd=['/usr/sbin/httpd', '-v'],
83 define_cmd=['/usr/sbin/httpd', '-t', '-D', 'DUMP_RUN_CFG'],
84 restart_cmd=['apachectl', 'graceful'],
85 conftest_cmd=['apachectl', 'configtest'],
86 enmod=None,
87 dismod=None,
88 le_vhost_ext="-le-ssl.conf",
89 handle_mods=False,
90 handle_sites=False,
91 challenge_location="/etc/apache2/other",
92 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
93 "certbot_apache", "options-ssl-apache.conf")
94 )
95 CLI_DEFAULTS_SUSE = dict(
96 server_root="/etc/apache2",
97 vhost_root="/etc/apache2/vhosts.d",
98 vhost_files="*.conf",
99 logs_root="/var/log/apache2",
100 version_cmd=['apache2ctl', '-v'],
101 define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
102 restart_cmd=['apache2ctl', 'graceful'],
103 conftest_cmd=['apache2ctl', 'configtest'],
104 enmod="a2enmod",
105 dismod="a2dismod",
106 le_vhost_ext="-le-ssl.conf",
107 handle_mods=False,
108 handle_sites=False,
109 challenge_location="/etc/apache2/vhosts.d",
110 MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
111 "certbot_apache", "options-ssl-apache.conf")
112 )
113 CLI_DEFAULTS = {
114 "default": CLI_DEFAULTS_DEFAULT,
115 "debian": CLI_DEFAULTS_DEBIAN,
116 "ubuntu": CLI_DEFAULTS_DEBIAN,
117 "centos": CLI_DEFAULTS_CENTOS,
118 "centos linux": CLI_DEFAULTS_CENTOS,
119 "fedora": CLI_DEFAULTS_CENTOS,
120 "red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
121 "rhel": CLI_DEFAULTS_CENTOS,
122 "amazon": CLI_DEFAULTS_CENTOS,
123 "gentoo": CLI_DEFAULTS_GENTOO,
124 "gentoo base system": CLI_DEFAULTS_GENTOO,
125 "darwin": CLI_DEFAULTS_DARWIN,
126 "opensuse": CLI_DEFAULTS_SUSE,
127 "suse": CLI_DEFAULTS_SUSE,
128 }
129 """CLI defaults."""
130
131 MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
132 """Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
133
134 AUGEAS_LENS_DIR = pkg_resources.resource_filename(
135 "certbot_apache", "augeas_lens")
136 """Path to the Augeas lens directory"""
137
138 REWRITE_HTTPS_ARGS = [
139 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,NE,R=permanent]"]
140 """Apache version<2.3.9 rewrite rule arguments used for redirections to
141 https vhost"""
142
143 REWRITE_HTTPS_ARGS_WITH_END = [
144 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,NE,R=permanent]"]
145 """Apache version >= 2.3.9 rewrite rule arguments used for redirections to
146 https vhost"""
147
148 OLD_REWRITE_HTTPS_ARGS = [
149 ["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"],
150 ["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]]
151
152 HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
153 "\"max-age=31536000\""]
154 """Apache header arguments for HSTS"""
155
156 UIR_ARGS = ["always", "set", "Content-Security-Policy",
157 "upgrade-insecure-requests"]
158
159 HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
160 "Upgrade-Insecure-Requests": UIR_ARGS}
161
162
163 def os_constant(key):
164 """
165 Get a constant value for operating system
166
167 :param key: name of cli constant
168 :return: value of constant for active os
169 """
170
171 os_info = util.get_os_info()
172 try:
173 constants = CLI_DEFAULTS[os_info[0].lower()]
174 except KeyError:
175 constants = os_like_constants()
176 if not constants:
177 constants = CLI_DEFAULTS["default"]
178 return constants[key]
179
180
181 def os_like_constants():
182 """
183 Try to get constants for distribution with
184 similar layout and configuration, indicated by
185 /etc/os-release variable "LIKE"
186
187 :returns: Constants dictionary
188 :rtype: `dict`
189 """
190
191 os_like = util.get_systemd_os_like()
192 if os_like:
193 for os_name in os_like:
194 if os_name in CLI_DEFAULTS.keys():
195 return CLI_DEFAULTS[os_name]
196 return {}
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot-apache/certbot_apache/constants.py b/certbot-apache/certbot_apache/constants.py
--- a/certbot-apache/certbot_apache/constants.py
+++ b/certbot-apache/certbot_apache/constants.py
@@ -110,6 +110,24 @@
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"certbot_apache", "options-ssl-apache.conf")
)
+CLI_DEFAULTS_ARCH = dict(
+ server_root="/etc/httpd",
+ vhost_root="/etc/httpd/conf",
+ vhost_files="*.conf",
+ logs_root="/var/log/httpd",
+ version_cmd=['apachectl', '-v'],
+ define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],
+ restart_cmd=['apachectl', 'graceful'],
+ conftest_cmd=['apachectl', 'configtest'],
+ enmod=None,
+ dismod=None,
+ le_vhost_ext="-le-ssl.conf",
+ handle_mods=False,
+ handle_sites=False,
+ challenge_location="/etc/httpd/conf",
+ MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
+ "certbot_apache", "options-ssl-apache.conf")
+)
CLI_DEFAULTS = {
"default": CLI_DEFAULTS_DEFAULT,
"debian": CLI_DEFAULTS_DEBIAN,
@@ -125,6 +143,7 @@
"darwin": CLI_DEFAULTS_DARWIN,
"opensuse": CLI_DEFAULTS_SUSE,
"suse": CLI_DEFAULTS_SUSE,
+ "arch": CLI_DEFAULTS_ARCH,
}
"""CLI defaults."""
|
{"golden_diff": "diff --git a/certbot-apache/certbot_apache/constants.py b/certbot-apache/certbot_apache/constants.py\n--- a/certbot-apache/certbot_apache/constants.py\n+++ b/certbot-apache/certbot_apache/constants.py\n@@ -110,6 +110,24 @@\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n )\n+CLI_DEFAULTS_ARCH = dict(\n+ server_root=\"/etc/httpd\",\n+ vhost_root=\"/etc/httpd/conf\",\n+ vhost_files=\"*.conf\",\n+ logs_root=\"/var/log/httpd\",\n+ version_cmd=['apachectl', '-v'],\n+ define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],\n+ restart_cmd=['apachectl', 'graceful'],\n+ conftest_cmd=['apachectl', 'configtest'],\n+ enmod=None,\n+ dismod=None,\n+ le_vhost_ext=\"-le-ssl.conf\",\n+ handle_mods=False,\n+ handle_sites=False,\n+ challenge_location=\"/etc/httpd/conf\",\n+ MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n+ \"certbot_apache\", \"options-ssl-apache.conf\")\n+)\n CLI_DEFAULTS = {\n \"default\": CLI_DEFAULTS_DEFAULT,\n \"debian\": CLI_DEFAULTS_DEBIAN,\n@@ -125,6 +143,7 @@\n \"darwin\": CLI_DEFAULTS_DARWIN,\n \"opensuse\": CLI_DEFAULTS_SUSE,\n \"suse\": CLI_DEFAULTS_SUSE,\n+ \"arch\": CLI_DEFAULTS_ARCH,\n }\n \"\"\"CLI defaults.\"\"\"\n", "issue": "Cannot find Apache control command apache2ctl on Arch Linux\nI have this problem when using certbot for apache following this guide http://letsencrypt.readthedocs.io/en/latest/using.html#apache\r\n\r\n```\r\n$ sudo certbot --apache\r\nSaving debug log to /var/log/letsencrypt/letsencrypt.log\r\nFailed to find apache2ctl in expanded PATH: /usr/local/sbin:/usr/local/bin:/usr/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl:/usr/sbin\r\nThe apache plugin is not working; there may be problems with your existing configuration.\r\nThe error was: NoInstallationError('Cannot find Apache control command apache2ctl',)\r\n```\r\nI am on Arch Linux, and it seems that there is no apache2ctl for Arch but certbot requires it?\r\n\r\nI tried to installed it but no luck:\r\n\r\n```\r\n$ sudo pacman -S apache2\r\nerror: target not found: apache2\r\n```\r\n\r\nMy Apache's version:\r\n\r\n```\r\n$ httpd -v\r\nServer version: Apache/2.4.25 (Unix)\r\nServer built: Dec 20 2016 13:02:2\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\nfrom certbot import util\n\nCLI_DEFAULTS_DEFAULT = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/sites-available\",\n vhost_files=\"*\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_DEBIAN = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/sites-available\",\n vhost_files=\"*\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=True,\n handle_sites=True,\n challenge_location=\"/etc/apache2\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_CENTOS = dict(\n server_root=\"/etc/httpd\",\n vhost_root=\"/etc/httpd/conf.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/httpd\",\n version_cmd=['apachectl', '-v'],\n define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/httpd/conf.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"centos-options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_GENTOO = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/apache2', '-v'],\n define_cmd=['apache2ctl', 'virtualhosts'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_DARWIN = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/other\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/httpd', '-v'],\n define_cmd=['/usr/sbin/httpd', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/other\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_SUSE = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS = {\n \"default\": CLI_DEFAULTS_DEFAULT,\n \"debian\": CLI_DEFAULTS_DEBIAN,\n \"ubuntu\": CLI_DEFAULTS_DEBIAN,\n \"centos\": CLI_DEFAULTS_CENTOS,\n \"centos linux\": CLI_DEFAULTS_CENTOS,\n \"fedora\": CLI_DEFAULTS_CENTOS,\n \"red hat enterprise linux server\": CLI_DEFAULTS_CENTOS,\n \"rhel\": CLI_DEFAULTS_CENTOS,\n \"amazon\": CLI_DEFAULTS_CENTOS,\n \"gentoo\": CLI_DEFAULTS_GENTOO,\n \"gentoo base system\": CLI_DEFAULTS_GENTOO,\n \"darwin\": CLI_DEFAULTS_DARWIN,\n \"opensuse\": CLI_DEFAULTS_SUSE,\n \"suse\": CLI_DEFAULTS_SUSE,\n}\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"certbot_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,NE,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to\nhttps vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,NE,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nOLD_REWRITE_HTTPS_ARGS = [\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"],\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]]\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n\ndef os_constant(key):\n \"\"\"\n Get a constant value for operating system\n\n :param key: name of cli constant\n :return: value of constant for active os\n \"\"\"\n\n os_info = util.get_os_info()\n try:\n constants = CLI_DEFAULTS[os_info[0].lower()]\n except KeyError:\n constants = os_like_constants()\n if not constants:\n constants = CLI_DEFAULTS[\"default\"]\n return constants[key]\n\n\ndef os_like_constants():\n \"\"\"\n Try to get constants for distribution with\n similar layout and configuration, indicated by\n /etc/os-release variable \"LIKE\"\n\n :returns: Constants dictionary\n :rtype: `dict`\n \"\"\"\n\n os_like = util.get_systemd_os_like()\n if os_like:\n for os_name in os_like:\n if os_name in CLI_DEFAULTS.keys():\n return CLI_DEFAULTS[os_name]\n return {}\n", "path": "certbot-apache/certbot_apache/constants.py"}], "after_files": [{"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\nfrom certbot import util\n\nCLI_DEFAULTS_DEFAULT = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/sites-available\",\n vhost_files=\"*\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_DEBIAN = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/sites-available\",\n vhost_files=\"*\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=True,\n handle_sites=True,\n challenge_location=\"/etc/apache2\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_CENTOS = dict(\n server_root=\"/etc/httpd\",\n vhost_root=\"/etc/httpd/conf.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/httpd\",\n version_cmd=['apachectl', '-v'],\n define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/httpd/conf.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"centos-options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_GENTOO = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/apache2', '-v'],\n define_cmd=['apache2ctl', 'virtualhosts'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_DARWIN = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/other\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['/usr/sbin/httpd', '-v'],\n define_cmd=['/usr/sbin/httpd', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/other\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_SUSE = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n version_cmd=['apache2ctl', '-v'],\n define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS_ARCH = dict(\n server_root=\"/etc/httpd\",\n vhost_root=\"/etc/httpd/conf\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/httpd\",\n version_cmd=['apachectl', '-v'],\n define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],\n restart_cmd=['apachectl', 'graceful'],\n conftest_cmd=['apachectl', 'configtest'],\n enmod=None,\n dismod=None,\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/httpd/conf\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n)\nCLI_DEFAULTS = {\n \"default\": CLI_DEFAULTS_DEFAULT,\n \"debian\": CLI_DEFAULTS_DEBIAN,\n \"ubuntu\": CLI_DEFAULTS_DEBIAN,\n \"centos\": CLI_DEFAULTS_CENTOS,\n \"centos linux\": CLI_DEFAULTS_CENTOS,\n \"fedora\": CLI_DEFAULTS_CENTOS,\n \"red hat enterprise linux server\": CLI_DEFAULTS_CENTOS,\n \"rhel\": CLI_DEFAULTS_CENTOS,\n \"amazon\": CLI_DEFAULTS_CENTOS,\n \"gentoo\": CLI_DEFAULTS_GENTOO,\n \"gentoo base system\": CLI_DEFAULTS_GENTOO,\n \"darwin\": CLI_DEFAULTS_DARWIN,\n \"opensuse\": CLI_DEFAULTS_SUSE,\n \"suse\": CLI_DEFAULTS_SUSE,\n \"arch\": CLI_DEFAULTS_ARCH,\n}\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"certbot_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,NE,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to\nhttps vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,NE,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nOLD_REWRITE_HTTPS_ARGS = [\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"],\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]]\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n\ndef os_constant(key):\n \"\"\"\n Get a constant value for operating system\n\n :param key: name of cli constant\n :return: value of constant for active os\n \"\"\"\n\n os_info = util.get_os_info()\n try:\n constants = CLI_DEFAULTS[os_info[0].lower()]\n except KeyError:\n constants = os_like_constants()\n if not constants:\n constants = CLI_DEFAULTS[\"default\"]\n return constants[key]\n\n\ndef os_like_constants():\n \"\"\"\n Try to get constants for distribution with\n similar layout and configuration, indicated by\n /etc/os-release variable \"LIKE\"\n\n :returns: Constants dictionary\n :rtype: `dict`\n \"\"\"\n\n os_like = util.get_systemd_os_like()\n if os_like:\n for os_name in os_like:\n if os_name in CLI_DEFAULTS.keys():\n return CLI_DEFAULTS[os_name]\n return {}\n", "path": "certbot-apache/certbot_apache/constants.py"}]}
| 2,762 | 375 |
gh_patches_debug_33444
|
rasdani/github-patches
|
git_diff
|
ResonantGeoData__ResonantGeoData-667
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`yield_checksumfiles` function doesn't handle `ChecksumFiles` with file paths as their `name` correctly
To reproduce, run [this management script](https://gist.github.com/mvandenburgh/6c9b284b6bf2b684b43236fe0da35a93) and then try to view the `Raster` that it creates. The tile requests will fail with 500 errors with errors about wrong file paths in the server log.
I came across this bug when working on Danesfield. On that project we encode the file path of a given `ChecksumFile` inside the `name` column - for example, `foo/bar/foobar.txt` represents a file hierarchy of
```
foo/
bar/
foobar.txt
````
[This line](https://github.com/ResonantGeoData/ResonantGeoData/blob/main/django-rgd/rgd/models/file.py#L290) in RGD is incompatible with this approach, since it assumes the ChecksumFile name is a flat file name instead of a file path. For example, if we have a ChecksumFile with the name `foo.tiff`, the current code would work:
```
path = '/tmp/rgd/file_cache/foo.tiff'
with yield_checksumfiles([self], path.parent):
yield path
```
Note that `path.parent` would be `/tmp/rgd/file_cache/foo.tiff`, which is correct. But if the file name is `a/b/c/foo.tiff`:
```
path = '/tmp/rgd/file_cache/a/b/c/foo.tiff'
with yield_checksumfiles([self], path.parent):
yield path
```
the call to `path.parent` would evaluate to `/tmp/rgd/file_cache/a/b/c`, which is incorrect and leads to 500 errors since the server can't find the files in that location.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django-rgd/rgd/models/file.py`
Content:
```
1 from contextlib import contextmanager
2 import logging
3 import os
4 from pathlib import Path
5 from urllib.error import URLError
6 from urllib.parse import urlparse
7
8 from crum import get_current_user
9 from django.conf import settings
10 from django.contrib.gis.db import models
11 from django_extensions.db.models import TimeStampedModel
12 from rgd.utility import (
13 _link_url,
14 clean_file_cache,
15 compute_checksum_file_field,
16 compute_checksum_url,
17 compute_hash,
18 download_field_file_to_local_path,
19 download_url_file_to_local_path,
20 get_cache_dir,
21 get_file_lock,
22 patch_internal_presign,
23 precheck_fuse,
24 safe_urlopen,
25 url_file_to_fuse_path,
26 uuid_prefix_filename,
27 )
28 from s3_file_field import S3FileField
29
30 from .. import tasks
31 from .collection import Collection
32 from .fileset import FileSet
33 from .mixins import TaskEventMixin
34
35 logger = logging.getLogger(__name__)
36
37
38 class FileSourceType(models.IntegerChoices):
39 FILE_FIELD = 1, 'FileField'
40 URL = 2, 'URL'
41
42
43 class ChecksumFile(TimeStampedModel, TaskEventMixin):
44 """The main class for user-uploaded files.
45
46 This has support for manually uploading files or specifying a URL to a file
47 (for example in an existing S3 bucket). This broadly supports ``http<s>://``
48 URLs to file resources as well as ``s3://`` as long as the node the app is
49 running on is provisioned to access that S3 bucket.
50
51 """
52
53 name = models.CharField(max_length=1000, blank=True)
54 description = models.TextField(null=True, blank=True)
55 checksum = models.CharField(max_length=128) # sha512
56 validate_checksum = models.BooleanField(
57 default=False
58 ) # a flag to validate the checksum against the saved checksum
59 last_validation = models.BooleanField(default=True)
60 collection = models.ForeignKey(
61 Collection,
62 on_delete=models.SET_NULL,
63 related_name='%(class)ss',
64 related_query_name='%(class)ss',
65 null=True,
66 blank=True,
67 )
68 created_by = models.ForeignKey(
69 settings.AUTH_USER_MODEL, null=True, blank=True, default=None, on_delete=models.SET_NULL
70 )
71 file_set = models.ForeignKey(FileSet, null=True, blank=True, on_delete=models.SET_NULL)
72
73 type = models.IntegerField(choices=FileSourceType.choices, default=FileSourceType.FILE_FIELD)
74 file = S3FileField(null=True, blank=True, upload_to=uuid_prefix_filename)
75 url = models.TextField(null=True, blank=True)
76
77 task_funcs = (tasks.task_checksum_file_post_save,)
78
79 class Meta:
80 constraints = [
81 models.CheckConstraint(
82 name='%(app_label)s_%(class)s_file_source_value_matches_type',
83 check=(
84 models.Q(
85 models.Q(type=FileSourceType.FILE_FIELD, file__regex=r'.+')
86 & models.Q(models.Q(url__in=['', None]) | models.Q(url__isnull=True))
87 )
88 | models.Q(
89 models.Q(type=FileSourceType.URL)
90 & models.Q(models.Q(url__isnull=False) & models.Q(url__regex=r'.+'))
91 & models.Q(models.Q(file__in=['', None]) | models.Q(file__isnull=True))
92 )
93 ),
94 ),
95 models.UniqueConstraint(
96 fields=['file_set', 'name'],
97 name='unique_name',
98 ),
99 models.UniqueConstraint(
100 fields=['collection', 'url'],
101 name='unique_url_collection',
102 condition=models.Q(type=FileSourceType.URL), # Only apply if URL file
103 ),
104 ]
105
106 @property
107 def basename(self):
108 return os.path.basename(self.name)
109
110 @property
111 def size(self):
112 # Ensure safe check of self.file
113 try:
114 return self.file.size
115 except ValueError:
116 return None
117
118 def get_checksum(self):
119 """Compute a new checksum without saving it."""
120 if self.type == FileSourceType.FILE_FIELD:
121 return compute_checksum_file_field(self.file)
122 elif self.type == FileSourceType.URL:
123 parsed = urlparse(self.url)
124 if parsed.scheme in ['https', 'http']:
125 return compute_checksum_url(self.url)
126 else:
127 with self.yield_local_path() as path:
128 with open(path, 'rb') as f:
129 return compute_hash(f)
130 else:
131 raise NotImplementedError(f'Type ({self.type}) not supported.')
132
133 def update_checksum(self):
134 self.checksum = self.get_checksum()
135 # Simple update save - not full save
136 super(ChecksumFile, self).save(
137 update_fields=[
138 'checksum',
139 ]
140 )
141
142 def validate(self):
143 previous = self.checksum
144 self.update_checksum()
145 self.last_validation = self.checksum == previous
146 # Simple update save - not full save
147 super(ChecksumFile, self).save(
148 update_fields=[
149 'last_validation',
150 ]
151 )
152 return self.last_validation
153
154 def post_save_job(self):
155 if not self.checksum or self.validate_checksum:
156 if self.validate_checksum:
157 self.validate()
158 else:
159 self.update_checksum()
160 # Reset the user flags
161 self.validate_checksum = False
162 # Simple update save - not full save
163 self.save(
164 update_fields=[
165 'checksum',
166 'last_validation',
167 'validate_checksum',
168 ]
169 )
170
171 def save(self, *args, **kwargs):
172 if not self.name:
173 if self.type == FileSourceType.FILE_FIELD and self.file.name:
174 self.name = os.path.basename(self.file.name)
175 elif self.type == FileSourceType.URL:
176 parsed = urlparse(self.url)
177 if parsed.scheme in ['https', 'http']:
178 try:
179 with safe_urlopen(self.url) as r:
180 self.name = r.info().get_filename()
181 except (AttributeError, ValueError, URLError):
182 pass
183 if not self.name:
184 # Fallback
185 self.name = os.path.basename(urlparse(self.url).path)
186 # Handle `created_by` User
187 user = get_current_user()
188 if user and not user.pk:
189 user = None
190 if not self.pk:
191 self.created_by = user
192 # Must save the model with the file before accessing it for the checksum
193 super(ChecksumFile, self).save(*args, **kwargs)
194
195 def download_to_local_path(self, directory: str = None):
196 """Forcibly download this file to a directory on disk.
197
198 Cleanup must be handled by caller.
199
200 This will handle locking to prevent multiple processes/threads
201 from trying to download the file at the same time -- only one thread
202 or process will perform the download and the rest will yield its
203 result.
204
205 """
206 if directory is None:
207 dest_path = self.get_cache_path()
208 else:
209 dest_path = Path(directory, self.name)
210 dest_path.parent.mkdir(parents=True, exist_ok=True)
211 # Thread/process safe locking for file access
212 lock = get_file_lock(dest_path)
213
214 with lock: # TODO: handle timeouts in condition
215 if dest_path.exists() and dest_path.stat().st_size > 0:
216 # File already exists (is cached)
217 logger.debug(f'Found cached file ({self.pk}) at: {dest_path}')
218 # Touch the file so that it moves to the top of the priority list
219 # when cleaning.
220 dest_path.touch()
221 return dest_path
222 else:
223 logger.debug(f'Downloading file ({self.pk}) to: {dest_path}')
224 # If downloading to the cache, clean to achieve available free space
225 if get_cache_dir() in dest_path.parents:
226 clean_file_cache()
227 # TODO: handle if these fail (e.g. bad S3 credentials)
228 if self.type == FileSourceType.FILE_FIELD:
229 return download_field_file_to_local_path(self.file, dest_path)
230 elif self.type == FileSourceType.URL:
231 return download_url_file_to_local_path(self.url, dest_path)
232
233 def get_cache_path(self):
234 """Generate a predetermined path in the cache directory.
235
236 This will use the associated FileSet's cache path if this resource
237 has a file_set, otherwise it will place in the top of the cache
238 directory.
239
240 """
241 if self.file_set is None:
242 # If no file_set, place in the main cache directory
243 directory = get_cache_dir() / f'f-{self.pk}'
244 directory.mkdir(parents=True, exist_ok=True)
245 else:
246 directory = self.file_set.get_cache_path()
247 return directory / f'{self.name}'
248
249 @contextmanager
250 def yield_local_path(self, try_fuse: bool = True, yield_file_set: bool = False):
251 """Create a local path for this file and all other files in its file_set.
252
253 This will first attempt to use httpfs to FUSE mount the file's URL if
254 and only if the file does not belong to a FileSet. FUSE with multiple
255 files in a FileSet is not yet supported.
256
257 If FUSE is unavailable, this will fallback to downloading the entire
258 file (and the other files in this item's FileSet) to local storage.
259
260 Parameters
261 ----------
262 try_fuse : bool
263 Try to use the FUSE interface. If false, use VSI or download to
264 local storage.
265
266 yield_file_set : bool
267 Yield all of the files in this file's file_set if available.
268
269 """
270 # TODO: fix FUSE to handle adjacent files
271 if (
272 self.file_set is None
273 and try_fuse
274 and self.type == FileSourceType.URL
275 and precheck_fuse(self.get_url())
276 ):
277 yield url_file_to_fuse_path(self.get_url(internal=True))
278 return
279 # Fallback to loading entire file locally - this uses `get_temp_path`
280 logger.debug('`yield_local_path` falling back to downloading entire file to local storage.')
281 path = self.get_cache_path()
282 if yield_file_set and self.file_set:
283 # NOTE: This is messy and should be improved but it ensures the directory remains locked
284 with self.file_set.yield_all_to_local_path() as _:
285 yield path
286 return
287 # Not in file_set. Download to cache dir
288 from .utils import yield_checksumfiles
289
290 with yield_checksumfiles([self], path.parent):
291 yield path
292
293 def get_url(self, internal: bool = False):
294 """Get the URL of the stored resource.
295
296 Parameters
297 ----------
298 internal : bool
299 In most cases this URL will be accessible from anywhere. In some
300 cases, this URL will only be accessible from within the container.
301 This flag is for use with internal processes to make sure the host
302 is correctly set to ``minio`` when needed. See
303 ``patch_internal_presign`` for more details.
304
305 """
306 if self.type == FileSourceType.FILE_FIELD:
307 if internal:
308 with patch_internal_presign(self.file):
309 return self.file.url
310 else:
311 return self.file.url
312 elif self.type == FileSourceType.URL:
313 return self.url
314
315 def data_link(self):
316 return _link_url(self, 'get_url')
317
318 data_link.allow_tags = True
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django-rgd/rgd/models/file.py b/django-rgd/rgd/models/file.py
--- a/django-rgd/rgd/models/file.py
+++ b/django-rgd/rgd/models/file.py
@@ -230,7 +230,7 @@
elif self.type == FileSourceType.URL:
return download_url_file_to_local_path(self.url, dest_path)
- def get_cache_path(self):
+ def get_cache_path(self, root: bool = False):
"""Generate a predetermined path in the cache directory.
This will use the associated FileSet's cache path if this resource
@@ -244,6 +244,8 @@
directory.mkdir(parents=True, exist_ok=True)
else:
directory = self.file_set.get_cache_path()
+ if root:
+ return directory
return directory / f'{self.name}'
@contextmanager
@@ -278,6 +280,7 @@
return
# Fallback to loading entire file locally - this uses `get_temp_path`
logger.debug('`yield_local_path` falling back to downloading entire file to local storage.')
+ root = self.get_cache_path(root=True)
path = self.get_cache_path()
if yield_file_set and self.file_set:
# NOTE: This is messy and should be improved but it ensures the directory remains locked
@@ -287,7 +290,7 @@
# Not in file_set. Download to cache dir
from .utils import yield_checksumfiles
- with yield_checksumfiles([self], path.parent):
+ with yield_checksumfiles([self], root):
yield path
def get_url(self, internal: bool = False):
|
{"golden_diff": "diff --git a/django-rgd/rgd/models/file.py b/django-rgd/rgd/models/file.py\n--- a/django-rgd/rgd/models/file.py\n+++ b/django-rgd/rgd/models/file.py\n@@ -230,7 +230,7 @@\n elif self.type == FileSourceType.URL:\n return download_url_file_to_local_path(self.url, dest_path)\n \n- def get_cache_path(self):\n+ def get_cache_path(self, root: bool = False):\n \"\"\"Generate a predetermined path in the cache directory.\n \n This will use the associated FileSet's cache path if this resource\n@@ -244,6 +244,8 @@\n directory.mkdir(parents=True, exist_ok=True)\n else:\n directory = self.file_set.get_cache_path()\n+ if root:\n+ return directory\n return directory / f'{self.name}'\n \n @contextmanager\n@@ -278,6 +280,7 @@\n return\n # Fallback to loading entire file locally - this uses `get_temp_path`\n logger.debug('`yield_local_path` falling back to downloading entire file to local storage.')\n+ root = self.get_cache_path(root=True)\n path = self.get_cache_path()\n if yield_file_set and self.file_set:\n # NOTE: This is messy and should be improved but it ensures the directory remains locked\n@@ -287,7 +290,7 @@\n # Not in file_set. Download to cache dir\n from .utils import yield_checksumfiles\n \n- with yield_checksumfiles([self], path.parent):\n+ with yield_checksumfiles([self], root):\n yield path\n \n def get_url(self, internal: bool = False):\n", "issue": "`yield_checksumfiles` function doesn't handle `ChecksumFiles` with file paths as their `name` correctly\nTo reproduce, run [this management script](https://gist.github.com/mvandenburgh/6c9b284b6bf2b684b43236fe0da35a93) and then try to view the `Raster` that it creates. The tile requests will fail with 500 errors with errors about wrong file paths in the server log.\r\n\r\nI came across this bug when working on Danesfield. On that project we encode the file path of a given `ChecksumFile` inside the `name` column - for example, `foo/bar/foobar.txt` represents a file hierarchy of\r\n```\r\nfoo/\r\n bar/\r\n foobar.txt\r\n````\r\n\r\n[This line](https://github.com/ResonantGeoData/ResonantGeoData/blob/main/django-rgd/rgd/models/file.py#L290) in RGD is incompatible with this approach, since it assumes the ChecksumFile name is a flat file name instead of a file path. For example, if we have a ChecksumFile with the name `foo.tiff`, the current code would work:\r\n```\r\npath = '/tmp/rgd/file_cache/foo.tiff'\r\nwith yield_checksumfiles([self], path.parent):\r\n yield path\r\n```\r\nNote that `path.parent` would be `/tmp/rgd/file_cache/foo.tiff`, which is correct. But if the file name is `a/b/c/foo.tiff`:\r\n```\r\npath = '/tmp/rgd/file_cache/a/b/c/foo.tiff'\r\nwith yield_checksumfiles([self], path.parent):\r\n yield path\r\n```\r\nthe call to `path.parent` would evaluate to `/tmp/rgd/file_cache/a/b/c`, which is incorrect and leads to 500 errors since the server can't find the files in that location.\n", "before_files": [{"content": "from contextlib import contextmanager\nimport logging\nimport os\nfrom pathlib import Path\nfrom urllib.error import URLError\nfrom urllib.parse import urlparse\n\nfrom crum import get_current_user\nfrom django.conf import settings\nfrom django.contrib.gis.db import models\nfrom django_extensions.db.models import TimeStampedModel\nfrom rgd.utility import (\n _link_url,\n clean_file_cache,\n compute_checksum_file_field,\n compute_checksum_url,\n compute_hash,\n download_field_file_to_local_path,\n download_url_file_to_local_path,\n get_cache_dir,\n get_file_lock,\n patch_internal_presign,\n precheck_fuse,\n safe_urlopen,\n url_file_to_fuse_path,\n uuid_prefix_filename,\n)\nfrom s3_file_field import S3FileField\n\nfrom .. import tasks\nfrom .collection import Collection\nfrom .fileset import FileSet\nfrom .mixins import TaskEventMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass FileSourceType(models.IntegerChoices):\n FILE_FIELD = 1, 'FileField'\n URL = 2, 'URL'\n\n\nclass ChecksumFile(TimeStampedModel, TaskEventMixin):\n \"\"\"The main class for user-uploaded files.\n\n This has support for manually uploading files or specifying a URL to a file\n (for example in an existing S3 bucket). This broadly supports ``http<s>://``\n URLs to file resources as well as ``s3://`` as long as the node the app is\n running on is provisioned to access that S3 bucket.\n\n \"\"\"\n\n name = models.CharField(max_length=1000, blank=True)\n description = models.TextField(null=True, blank=True)\n checksum = models.CharField(max_length=128) # sha512\n validate_checksum = models.BooleanField(\n default=False\n ) # a flag to validate the checksum against the saved checksum\n last_validation = models.BooleanField(default=True)\n collection = models.ForeignKey(\n Collection,\n on_delete=models.SET_NULL,\n related_name='%(class)ss',\n related_query_name='%(class)ss',\n null=True,\n blank=True,\n )\n created_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, blank=True, default=None, on_delete=models.SET_NULL\n )\n file_set = models.ForeignKey(FileSet, null=True, blank=True, on_delete=models.SET_NULL)\n\n type = models.IntegerField(choices=FileSourceType.choices, default=FileSourceType.FILE_FIELD)\n file = S3FileField(null=True, blank=True, upload_to=uuid_prefix_filename)\n url = models.TextField(null=True, blank=True)\n\n task_funcs = (tasks.task_checksum_file_post_save,)\n\n class Meta:\n constraints = [\n models.CheckConstraint(\n name='%(app_label)s_%(class)s_file_source_value_matches_type',\n check=(\n models.Q(\n models.Q(type=FileSourceType.FILE_FIELD, file__regex=r'.+')\n & models.Q(models.Q(url__in=['', None]) | models.Q(url__isnull=True))\n )\n | models.Q(\n models.Q(type=FileSourceType.URL)\n & models.Q(models.Q(url__isnull=False) & models.Q(url__regex=r'.+'))\n & models.Q(models.Q(file__in=['', None]) | models.Q(file__isnull=True))\n )\n ),\n ),\n models.UniqueConstraint(\n fields=['file_set', 'name'],\n name='unique_name',\n ),\n models.UniqueConstraint(\n fields=['collection', 'url'],\n name='unique_url_collection',\n condition=models.Q(type=FileSourceType.URL), # Only apply if URL file\n ),\n ]\n\n @property\n def basename(self):\n return os.path.basename(self.name)\n\n @property\n def size(self):\n # Ensure safe check of self.file\n try:\n return self.file.size\n except ValueError:\n return None\n\n def get_checksum(self):\n \"\"\"Compute a new checksum without saving it.\"\"\"\n if self.type == FileSourceType.FILE_FIELD:\n return compute_checksum_file_field(self.file)\n elif self.type == FileSourceType.URL:\n parsed = urlparse(self.url)\n if parsed.scheme in ['https', 'http']:\n return compute_checksum_url(self.url)\n else:\n with self.yield_local_path() as path:\n with open(path, 'rb') as f:\n return compute_hash(f)\n else:\n raise NotImplementedError(f'Type ({self.type}) not supported.')\n\n def update_checksum(self):\n self.checksum = self.get_checksum()\n # Simple update save - not full save\n super(ChecksumFile, self).save(\n update_fields=[\n 'checksum',\n ]\n )\n\n def validate(self):\n previous = self.checksum\n self.update_checksum()\n self.last_validation = self.checksum == previous\n # Simple update save - not full save\n super(ChecksumFile, self).save(\n update_fields=[\n 'last_validation',\n ]\n )\n return self.last_validation\n\n def post_save_job(self):\n if not self.checksum or self.validate_checksum:\n if self.validate_checksum:\n self.validate()\n else:\n self.update_checksum()\n # Reset the user flags\n self.validate_checksum = False\n # Simple update save - not full save\n self.save(\n update_fields=[\n 'checksum',\n 'last_validation',\n 'validate_checksum',\n ]\n )\n\n def save(self, *args, **kwargs):\n if not self.name:\n if self.type == FileSourceType.FILE_FIELD and self.file.name:\n self.name = os.path.basename(self.file.name)\n elif self.type == FileSourceType.URL:\n parsed = urlparse(self.url)\n if parsed.scheme in ['https', 'http']:\n try:\n with safe_urlopen(self.url) as r:\n self.name = r.info().get_filename()\n except (AttributeError, ValueError, URLError):\n pass\n if not self.name:\n # Fallback\n self.name = os.path.basename(urlparse(self.url).path)\n # Handle `created_by` User\n user = get_current_user()\n if user and not user.pk:\n user = None\n if not self.pk:\n self.created_by = user\n # Must save the model with the file before accessing it for the checksum\n super(ChecksumFile, self).save(*args, **kwargs)\n\n def download_to_local_path(self, directory: str = None):\n \"\"\"Forcibly download this file to a directory on disk.\n\n Cleanup must be handled by caller.\n\n This will handle locking to prevent multiple processes/threads\n from trying to download the file at the same time -- only one thread\n or process will perform the download and the rest will yield its\n result.\n\n \"\"\"\n if directory is None:\n dest_path = self.get_cache_path()\n else:\n dest_path = Path(directory, self.name)\n dest_path.parent.mkdir(parents=True, exist_ok=True)\n # Thread/process safe locking for file access\n lock = get_file_lock(dest_path)\n\n with lock: # TODO: handle timeouts in condition\n if dest_path.exists() and dest_path.stat().st_size > 0:\n # File already exists (is cached)\n logger.debug(f'Found cached file ({self.pk}) at: {dest_path}')\n # Touch the file so that it moves to the top of the priority list\n # when cleaning.\n dest_path.touch()\n return dest_path\n else:\n logger.debug(f'Downloading file ({self.pk}) to: {dest_path}')\n # If downloading to the cache, clean to achieve available free space\n if get_cache_dir() in dest_path.parents:\n clean_file_cache()\n # TODO: handle if these fail (e.g. bad S3 credentials)\n if self.type == FileSourceType.FILE_FIELD:\n return download_field_file_to_local_path(self.file, dest_path)\n elif self.type == FileSourceType.URL:\n return download_url_file_to_local_path(self.url, dest_path)\n\n def get_cache_path(self):\n \"\"\"Generate a predetermined path in the cache directory.\n\n This will use the associated FileSet's cache path if this resource\n has a file_set, otherwise it will place in the top of the cache\n directory.\n\n \"\"\"\n if self.file_set is None:\n # If no file_set, place in the main cache directory\n directory = get_cache_dir() / f'f-{self.pk}'\n directory.mkdir(parents=True, exist_ok=True)\n else:\n directory = self.file_set.get_cache_path()\n return directory / f'{self.name}'\n\n @contextmanager\n def yield_local_path(self, try_fuse: bool = True, yield_file_set: bool = False):\n \"\"\"Create a local path for this file and all other files in its file_set.\n\n This will first attempt to use httpfs to FUSE mount the file's URL if\n and only if the file does not belong to a FileSet. FUSE with multiple\n files in a FileSet is not yet supported.\n\n If FUSE is unavailable, this will fallback to downloading the entire\n file (and the other files in this item's FileSet) to local storage.\n\n Parameters\n ----------\n try_fuse : bool\n Try to use the FUSE interface. If false, use VSI or download to\n local storage.\n\n yield_file_set : bool\n Yield all of the files in this file's file_set if available.\n\n \"\"\"\n # TODO: fix FUSE to handle adjacent files\n if (\n self.file_set is None\n and try_fuse\n and self.type == FileSourceType.URL\n and precheck_fuse(self.get_url())\n ):\n yield url_file_to_fuse_path(self.get_url(internal=True))\n return\n # Fallback to loading entire file locally - this uses `get_temp_path`\n logger.debug('`yield_local_path` falling back to downloading entire file to local storage.')\n path = self.get_cache_path()\n if yield_file_set and self.file_set:\n # NOTE: This is messy and should be improved but it ensures the directory remains locked\n with self.file_set.yield_all_to_local_path() as _:\n yield path\n return\n # Not in file_set. Download to cache dir\n from .utils import yield_checksumfiles\n\n with yield_checksumfiles([self], path.parent):\n yield path\n\n def get_url(self, internal: bool = False):\n \"\"\"Get the URL of the stored resource.\n\n Parameters\n ----------\n internal : bool\n In most cases this URL will be accessible from anywhere. In some\n cases, this URL will only be accessible from within the container.\n This flag is for use with internal processes to make sure the host\n is correctly set to ``minio`` when needed. See\n ``patch_internal_presign`` for more details.\n\n \"\"\"\n if self.type == FileSourceType.FILE_FIELD:\n if internal:\n with patch_internal_presign(self.file):\n return self.file.url\n else:\n return self.file.url\n elif self.type == FileSourceType.URL:\n return self.url\n\n def data_link(self):\n return _link_url(self, 'get_url')\n\n data_link.allow_tags = True\n", "path": "django-rgd/rgd/models/file.py"}], "after_files": [{"content": "from contextlib import contextmanager\nimport logging\nimport os\nfrom pathlib import Path\nfrom urllib.error import URLError\nfrom urllib.parse import urlparse\n\nfrom crum import get_current_user\nfrom django.conf import settings\nfrom django.contrib.gis.db import models\nfrom django_extensions.db.models import TimeStampedModel\nfrom rgd.utility import (\n _link_url,\n clean_file_cache,\n compute_checksum_file_field,\n compute_checksum_url,\n compute_hash,\n download_field_file_to_local_path,\n download_url_file_to_local_path,\n get_cache_dir,\n get_file_lock,\n patch_internal_presign,\n precheck_fuse,\n safe_urlopen,\n url_file_to_fuse_path,\n uuid_prefix_filename,\n)\nfrom s3_file_field import S3FileField\n\nfrom .. import tasks\nfrom .collection import Collection\nfrom .fileset import FileSet\nfrom .mixins import TaskEventMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass FileSourceType(models.IntegerChoices):\n FILE_FIELD = 1, 'FileField'\n URL = 2, 'URL'\n\n\nclass ChecksumFile(TimeStampedModel, TaskEventMixin):\n \"\"\"The main class for user-uploaded files.\n\n This has support for manually uploading files or specifying a URL to a file\n (for example in an existing S3 bucket). This broadly supports ``http<s>://``\n URLs to file resources as well as ``s3://`` as long as the node the app is\n running on is provisioned to access that S3 bucket.\n\n \"\"\"\n\n name = models.CharField(max_length=1000, blank=True)\n description = models.TextField(null=True, blank=True)\n checksum = models.CharField(max_length=128) # sha512\n validate_checksum = models.BooleanField(\n default=False\n ) # a flag to validate the checksum against the saved checksum\n last_validation = models.BooleanField(default=True)\n collection = models.ForeignKey(\n Collection,\n on_delete=models.SET_NULL,\n related_name='%(class)ss',\n related_query_name='%(class)ss',\n null=True,\n blank=True,\n )\n created_by = models.ForeignKey(\n settings.AUTH_USER_MODEL, null=True, blank=True, default=None, on_delete=models.SET_NULL\n )\n file_set = models.ForeignKey(FileSet, null=True, blank=True, on_delete=models.SET_NULL)\n\n type = models.IntegerField(choices=FileSourceType.choices, default=FileSourceType.FILE_FIELD)\n file = S3FileField(null=True, blank=True, upload_to=uuid_prefix_filename)\n url = models.TextField(null=True, blank=True)\n\n task_funcs = (tasks.task_checksum_file_post_save,)\n\n class Meta:\n constraints = [\n models.CheckConstraint(\n name='%(app_label)s_%(class)s_file_source_value_matches_type',\n check=(\n models.Q(\n models.Q(type=FileSourceType.FILE_FIELD, file__regex=r'.+')\n & models.Q(models.Q(url__in=['', None]) | models.Q(url__isnull=True))\n )\n | models.Q(\n models.Q(type=FileSourceType.URL)\n & models.Q(models.Q(url__isnull=False) & models.Q(url__regex=r'.+'))\n & models.Q(models.Q(file__in=['', None]) | models.Q(file__isnull=True))\n )\n ),\n ),\n models.UniqueConstraint(\n fields=['file_set', 'name'],\n name='unique_name',\n ),\n models.UniqueConstraint(\n fields=['collection', 'url'],\n name='unique_url_collection',\n condition=models.Q(type=FileSourceType.URL), # Only apply if URL file\n ),\n ]\n\n @property\n def basename(self):\n return os.path.basename(self.name)\n\n @property\n def size(self):\n # Ensure safe check of self.file\n try:\n return self.file.size\n except ValueError:\n return None\n\n def get_checksum(self):\n \"\"\"Compute a new checksum without saving it.\"\"\"\n if self.type == FileSourceType.FILE_FIELD:\n return compute_checksum_file_field(self.file)\n elif self.type == FileSourceType.URL:\n parsed = urlparse(self.url)\n if parsed.scheme in ['https', 'http']:\n return compute_checksum_url(self.url)\n else:\n with self.yield_local_path() as path:\n with open(path, 'rb') as f:\n return compute_hash(f)\n else:\n raise NotImplementedError(f'Type ({self.type}) not supported.')\n\n def update_checksum(self):\n self.checksum = self.get_checksum()\n # Simple update save - not full save\n super(ChecksumFile, self).save(\n update_fields=[\n 'checksum',\n ]\n )\n\n def validate(self):\n previous = self.checksum\n self.update_checksum()\n self.last_validation = self.checksum == previous\n # Simple update save - not full save\n super(ChecksumFile, self).save(\n update_fields=[\n 'last_validation',\n ]\n )\n return self.last_validation\n\n def post_save_job(self):\n if not self.checksum or self.validate_checksum:\n if self.validate_checksum:\n self.validate()\n else:\n self.update_checksum()\n # Reset the user flags\n self.validate_checksum = False\n # Simple update save - not full save\n self.save(\n update_fields=[\n 'checksum',\n 'last_validation',\n 'validate_checksum',\n ]\n )\n\n def save(self, *args, **kwargs):\n if not self.name:\n if self.type == FileSourceType.FILE_FIELD and self.file.name:\n self.name = os.path.basename(self.file.name)\n elif self.type == FileSourceType.URL:\n parsed = urlparse(self.url)\n if parsed.scheme in ['https', 'http']:\n try:\n with safe_urlopen(self.url) as r:\n self.name = r.info().get_filename()\n except (AttributeError, ValueError, URLError):\n pass\n if not self.name:\n # Fallback\n self.name = os.path.basename(urlparse(self.url).path)\n # Handle `created_by` User\n user = get_current_user()\n if user and not user.pk:\n user = None\n if not self.pk:\n self.created_by = user\n # Must save the model with the file before accessing it for the checksum\n super(ChecksumFile, self).save(*args, **kwargs)\n\n def download_to_local_path(self, directory: str = None):\n \"\"\"Forcibly download this file to a directory on disk.\n\n Cleanup must be handled by caller.\n\n This will handle locking to prevent multiple processes/threads\n from trying to download the file at the same time -- only one thread\n or process will perform the download and the rest will yield its\n result.\n\n \"\"\"\n if directory is None:\n dest_path = self.get_cache_path()\n else:\n dest_path = Path(directory, self.name)\n dest_path.parent.mkdir(parents=True, exist_ok=True)\n # Thread/process safe locking for file access\n lock = get_file_lock(dest_path)\n\n with lock: # TODO: handle timeouts in condition\n if dest_path.exists() and dest_path.stat().st_size > 0:\n # File already exists (is cached)\n logger.debug(f'Found cached file ({self.pk}) at: {dest_path}')\n # Touch the file so that it moves to the top of the priority list\n # when cleaning.\n dest_path.touch()\n return dest_path\n else:\n logger.debug(f'Downloading file ({self.pk}) to: {dest_path}')\n # If downloading to the cache, clean to achieve available free space\n if get_cache_dir() in dest_path.parents:\n clean_file_cache()\n # TODO: handle if these fail (e.g. bad S3 credentials)\n if self.type == FileSourceType.FILE_FIELD:\n return download_field_file_to_local_path(self.file, dest_path)\n elif self.type == FileSourceType.URL:\n return download_url_file_to_local_path(self.url, dest_path)\n\n def get_cache_path(self, root: bool = False):\n \"\"\"Generate a predetermined path in the cache directory.\n\n This will use the associated FileSet's cache path if this resource\n has a file_set, otherwise it will place in the top of the cache\n directory.\n\n \"\"\"\n if self.file_set is None:\n # If no file_set, place in the main cache directory\n directory = get_cache_dir() / f'f-{self.pk}'\n directory.mkdir(parents=True, exist_ok=True)\n else:\n directory = self.file_set.get_cache_path()\n if root:\n return directory\n return directory / f'{self.name}'\n\n @contextmanager\n def yield_local_path(self, try_fuse: bool = True, yield_file_set: bool = False):\n \"\"\"Create a local path for this file and all other files in its file_set.\n\n This will first attempt to use httpfs to FUSE mount the file's URL if\n and only if the file does not belong to a FileSet. FUSE with multiple\n files in a FileSet is not yet supported.\n\n If FUSE is unavailable, this will fallback to downloading the entire\n file (and the other files in this item's FileSet) to local storage.\n\n Parameters\n ----------\n try_fuse : bool\n Try to use the FUSE interface. If false, use VSI or download to\n local storage.\n\n yield_file_set : bool\n Yield all of the files in this file's file_set if available.\n\n \"\"\"\n # TODO: fix FUSE to handle adjacent files\n if (\n self.file_set is None\n and try_fuse\n and self.type == FileSourceType.URL\n and precheck_fuse(self.get_url())\n ):\n yield url_file_to_fuse_path(self.get_url(internal=True))\n return\n # Fallback to loading entire file locally - this uses `get_temp_path`\n logger.debug('`yield_local_path` falling back to downloading entire file to local storage.')\n root = self.get_cache_path(root=True)\n path = self.get_cache_path()\n if yield_file_set and self.file_set:\n # NOTE: This is messy and should be improved but it ensures the directory remains locked\n with self.file_set.yield_all_to_local_path() as _:\n yield path\n return\n # Not in file_set. Download to cache dir\n from .utils import yield_checksumfiles\n\n with yield_checksumfiles([self], root):\n yield path\n\n def get_url(self, internal: bool = False):\n \"\"\"Get the URL of the stored resource.\n\n Parameters\n ----------\n internal : bool\n In most cases this URL will be accessible from anywhere. In some\n cases, this URL will only be accessible from within the container.\n This flag is for use with internal processes to make sure the host\n is correctly set to ``minio`` when needed. See\n ``patch_internal_presign`` for more details.\n\n \"\"\"\n if self.type == FileSourceType.FILE_FIELD:\n if internal:\n with patch_internal_presign(self.file):\n return self.file.url\n else:\n return self.file.url\n elif self.type == FileSourceType.URL:\n return self.url\n\n def data_link(self):\n return _link_url(self, 'get_url')\n\n data_link.allow_tags = True\n", "path": "django-rgd/rgd/models/file.py"}]}
| 3,942 | 381 |
gh_patches_debug_4231
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-9410
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] basic-auth alembic migration crash on remote database
### Issues Policy acknowledgement
- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)
### Willingness to contribute
Yes. I can contribute a fix for this bug independently.
### MLflow version
- Client: 1.5.0
- Tracking server: 1.5.0
### System information
- **Debian GNU/Linux 10 (buster)**:
- **Python 3.8.17 **:
- **SQLAlchemy version: 2.0.19**
### Describe the problem
Alembic migration for the basic-auth app crashes due to obfuscated password being passed to the alembic config
This bug seems to be related to the SQLAlchemy version, migration works with SQLAlchemy 1.4.19 but not with 2.0.19, seems like they change the behaviour of the str representation of url's.
### Tracking information
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```shell
REPLACE_ME
```
### Code to reproduce issue
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```
python -m mlflow.server.auth db upgrade --url 'postgresql://database_user:database_password@host:5432/database_name'
```
### Stack trace
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```
Traceback (most recent call last):
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 145, in __init__
self._dbapi_connection = engine.raw_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3288, in raw_connection
return self.pool.connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 452, in connect
return _ConnectionFairy._checkout(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 1267, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 716, in checkout
rec = pool._do_get()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py", line 284, in _do_get
return self._create_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 393, in _create_connection
return _ConnectionRecord(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 678, in __init__
self.__connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 903, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 147, in __exit__
raise exc_value.with_traceback(exc_tb)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 898, in __connect
self.dbapi_connection = connection = pool._invoke_creator(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py", line 637, in connect
return dialect.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 615, in connect
return self.loaded_dbapi.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at "host" (host ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
connection to server at "host" (host_ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/local/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/__main__.py", line 5, in <module>
commands()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/cli.py", line 19, in upgrade
utils.migrate(engine, revision)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/utils.py", line 28, in migrate
upgrade(alembic_cfg, revision)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/command.py", line 385, in upgrade
script.run_env()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/script/base.py", line 578, in run_env
util.load_python_file(self.dir, "env.py")
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py", line 109, in load_module_py
spec.loader.exec_module(module) # type: ignore
File "<frozen importlib._bootstrap_external>", line 843, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py", line 79, in <module>
run_migrations_online()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py", line 69, in run_migrations_online
with connectable.connect() as connection:
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3264, in connect
return self._connection_cls(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 147, in __init__
Connection._handle_dbapi_exception_noconnection(
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 2426, in _handle_dbapi_exception_noconnection
raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 145, in __init__
self._dbapi_connection = engine.raw_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 3288, in raw_connection
return self.pool.connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 452, in connect
return _ConnectionFairy._checkout(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 1267, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 716, in checkout
rec = pool._do_get()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py", line 284, in _do_get
return self._create_connection()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 393, in _create_connection
return _ConnectionRecord(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 678, in __init__
self.__connect()
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 903, in __connect
pool.logger.debug("Error on connect(): %s", e)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py", line 147, in __exit__
raise exc_value.with_traceback(exc_tb)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py", line 898, in __connect
self.dbapi_connection = connection = pool._invoke_creator(self)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py", line 637, in connect
return dialect.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 615, in connect
return self.loaded_dbapi.connect(*cargs, **cparams)
File "/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py", line 122, in connect
conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at "host" (host ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
connection to server at "host" (host_ip), port 5432 failed: FATAL: password authentication failed for user "db_user"
(Background on this error at: https://sqlalche.me/e/20/e3q8)
```
### Other info / logs
<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->
```
REPLACE_ME
```
### What component(s) does this bug affect?
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs
- [X] `area/server-infra`: MLflow Tracking server backend
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
### What interface(s) does this bug affect?
- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
### What language(s) does this bug affect?
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
### What integration(s) does this bug affect?
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/server/auth/db/utils.py`
Content:
```
1 from pathlib import Path
2
3 from sqlalchemy.engine.base import Engine
4 from alembic.command import upgrade
5 from alembic.config import Config
6 from alembic.script import ScriptDirectory
7 from alembic.migration import MigrationContext
8
9
10 def _get_alembic_dir() -> str:
11 return Path(__file__).parent / "migrations"
12
13
14 def _get_alembic_config(url: str) -> Config:
15 alembic_dir = _get_alembic_dir()
16 alembic_ini_path = alembic_dir / "alembic.ini"
17 alembic_cfg = Config(alembic_ini_path)
18 alembic_cfg.set_main_option("script_location", str(alembic_dir))
19 alembic_cfg.set_main_option("sqlalchemy.url", url)
20 return alembic_cfg
21
22
23 def migrate(engine: Engine, revision: str) -> None:
24 alembic_cfg = _get_alembic_config(str(engine.url))
25 with engine.begin() as conn:
26 alembic_cfg.attributes["connection"] = conn
27 upgrade(alembic_cfg, revision)
28
29
30 def migrate_if_needed(engine: Engine, revision: str) -> None:
31 alembic_cfg = _get_alembic_config(str(engine.url))
32 script_dir = ScriptDirectory.from_config(alembic_cfg)
33 with engine.begin() as conn:
34 context = MigrationContext.configure(conn)
35 if context.get_current_revision() != script_dir.get_current_head():
36 upgrade(alembic_cfg, revision)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py
--- a/mlflow/server/auth/db/utils.py
+++ b/mlflow/server/auth/db/utils.py
@@ -21,7 +21,7 @@
def migrate(engine: Engine, revision: str) -> None:
- alembic_cfg = _get_alembic_config(str(engine.url))
+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))
with engine.begin() as conn:
alembic_cfg.attributes["connection"] = conn
upgrade(alembic_cfg, revision)
|
{"golden_diff": "diff --git a/mlflow/server/auth/db/utils.py b/mlflow/server/auth/db/utils.py\n--- a/mlflow/server/auth/db/utils.py\n+++ b/mlflow/server/auth/db/utils.py\n@@ -21,7 +21,7 @@\n \n \n def migrate(engine: Engine, revision: str) -> None:\n- alembic_cfg = _get_alembic_config(str(engine.url))\n+ alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n", "issue": "[BUG] basic-auth alembic migration crash on remote database\n### Issues Policy acknowledgement\n\n- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\n\n### Willingness to contribute\n\nYes. I can contribute a fix for this bug independently.\n\n### MLflow version\n\n- Client: 1.5.0\r\n- Tracking server: 1.5.0\r\n\n\n### System information\n\n- **Debian GNU/Linux 10 (buster)**:\r\n- **Python 3.8.17 **:\r\n\r\n- **SQLAlchemy version: 2.0.19**\n\n### Describe the problem\n\nAlembic migration for the basic-auth app crashes due to obfuscated password being passed to the alembic config\r\n\r\nThis bug seems to be related to the SQLAlchemy version, migration works with SQLAlchemy 1.4.19 but not with 2.0.19, seems like they change the behaviour of the str representation of url's.\n\n### Tracking information\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```shell\r\nREPLACE_ME\r\n```\r\n\n\n### Code to reproduce issue\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\npython -m mlflow.server.auth db upgrade --url 'postgresql://database_user:database_password@host:5432/database_name'\r\n```\r\n\n\n### Stack trace\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 145, in __init__\r\n self._dbapi_connection = engine.raw_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 3288, in raw_connection\r\n return self.pool.connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 452, in connect\r\n return _ConnectionFairy._checkout(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 1267, in _checkout\r\n fairy = _ConnectionRecord.checkout(pool)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 716, in checkout\r\n rec = pool._do_get()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py\", line 284, in _do_get\r\n return self._create_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 393, in _create_connection\r\n return _ConnectionRecord(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 678, in __init__\r\n self.__connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 903, in __connect\r\n pool.logger.debug(\"Error on connect(): %s\", e)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py\", line 147, in __exit__\r\n raise exc_value.with_traceback(exc_tb)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 898, in __connect\r\n self.dbapi_connection = connection = pool._invoke_creator(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py\", line 637, in connect\r\n return dialect.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py\", line 615, in connect\r\n return self.loaded_dbapi.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py\", line 122, in connect\r\n conn = _connect(dsn, connection_factory=connection_factory, **kwasync)\r\nsqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at \"host\" (host ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\nconnection to server at \"host\" (host_ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\n\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/local/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/__main__.py\", line 5, in <module>\r\n commands()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1157, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1078, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 1434, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/click/core.py\", line 783, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/cli.py\", line 19, in upgrade\r\n utils.migrate(engine, revision)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/utils.py\", line 28, in migrate\r\n upgrade(alembic_cfg, revision)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/command.py\", line 385, in upgrade\r\n script.run_env()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/script/base.py\", line 578, in run_env\r\n util.load_python_file(self.dir, \"env.py\")\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py\", line 93, in load_python_file\r\n module = load_module_py(module_id, path)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/alembic/util/pyfiles.py\", line 109, in load_module_py\r\n spec.loader.exec_module(module) # type: ignore\r\n File \"<frozen importlib._bootstrap_external>\", line 843, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py\", line 79, in <module>\r\n run_migrations_online()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/mlflow/server/auth/db/migrations/env.py\", line 69, in run_migrations_online\r\n with connectable.connect() as connection:\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 3264, in connect\r\n return self._connection_cls(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 147, in __init__\r\n Connection._handle_dbapi_exception_noconnection(\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 2426, in _handle_dbapi_exception_noconnection\r\n raise sqlalchemy_exception.with_traceback(exc_info[2]) from e\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 145, in __init__\r\n self._dbapi_connection = engine.raw_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/base.py\", line 3288, in raw_connection\r\n return self.pool.connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 452, in connect\r\n return _ConnectionFairy._checkout(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 1267, in _checkout\r\n fairy = _ConnectionRecord.checkout(pool)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 716, in checkout\r\n rec = pool._do_get()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/impl.py\", line 284, in _do_get\r\n return self._create_connection()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 393, in _create_connection\r\n return _ConnectionRecord(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 678, in __init__\r\n self.__connect()\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 903, in __connect\r\n pool.logger.debug(\"Error on connect(): %s\", e)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/util/langhelpers.py\", line 147, in __exit__\r\n raise exc_value.with_traceback(exc_tb)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/pool/base.py\", line 898, in __connect\r\n self.dbapi_connection = connection = pool._invoke_creator(self)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/create.py\", line 637, in connect\r\n return dialect.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/sqlalchemy/engine/default.py\", line 615, in connect\r\n return self.loaded_dbapi.connect(*cargs, **cparams)\r\n File \"/home/turuania/mlflow_test/env/lib/python3.8/site-packages/psycopg2/__init__.py\", line 122, in connect\r\n conn = _connect(dsn, connection_factory=connection_factory, **kwasync)\r\nsqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at \"host\" (host ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\nconnection to server at \"host\" (host_ip), port 5432 failed: FATAL: password authentication failed for user \"db_user\"\r\n\r\n(Background on this error at: https://sqlalche.me/e/20/e3q8)\r\n```\r\n\n\n### Other info / logs\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### What component(s) does this bug affect?\n\n- [ ] `area/artifacts`: Artifact stores and artifact logging\n- [ ] `area/build`: Build and test infrastructure for MLflow\n- [ ] `area/docs`: MLflow documentation pages\n- [ ] `area/examples`: Example code\n- [ ] `area/gateway`: AI Gateway service, Gateway client APIs, third-party Gateway integrations\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\n- [ ] `area/projects`: MLproject format, project running backends\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\n- [X] `area/server-infra`: MLflow Tracking server backend\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\n\n### What interface(s) does this bug affect?\n\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\n- [X] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\n- [ ] `area/windows`: Windows support\n\n### What language(s) does this bug affect?\n\n- [ ] `language/r`: R APIs and clients\n- [ ] `language/java`: Java APIs and clients\n- [ ] `language/new`: Proposals for new client languages\n\n### What integration(s) does this bug affect?\n\n- [ ] `integrations/azure`: Azure and Azure ML integrations\n- [ ] `integrations/sagemaker`: SageMaker integrations\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "from pathlib import Path\n\nfrom sqlalchemy.engine.base import Engine\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom alembic.script import ScriptDirectory\nfrom alembic.migration import MigrationContext\n\n\ndef _get_alembic_dir() -> str:\n return Path(__file__).parent / \"migrations\"\n\n\ndef _get_alembic_config(url: str) -> Config:\n alembic_dir = _get_alembic_dir()\n alembic_ini_path = alembic_dir / \"alembic.ini\"\n alembic_cfg = Config(alembic_ini_path)\n alembic_cfg.set_main_option(\"script_location\", str(alembic_dir))\n alembic_cfg.set_main_option(\"sqlalchemy.url\", url)\n return alembic_cfg\n\n\ndef migrate(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n\n\ndef migrate_if_needed(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n if context.get_current_revision() != script_dir.get_current_head():\n upgrade(alembic_cfg, revision)\n", "path": "mlflow/server/auth/db/utils.py"}], "after_files": [{"content": "from pathlib import Path\n\nfrom sqlalchemy.engine.base import Engine\nfrom alembic.command import upgrade\nfrom alembic.config import Config\nfrom alembic.script import ScriptDirectory\nfrom alembic.migration import MigrationContext\n\n\ndef _get_alembic_dir() -> str:\n return Path(__file__).parent / \"migrations\"\n\n\ndef _get_alembic_config(url: str) -> Config:\n alembic_dir = _get_alembic_dir()\n alembic_ini_path = alembic_dir / \"alembic.ini\"\n alembic_cfg = Config(alembic_ini_path)\n alembic_cfg.set_main_option(\"script_location\", str(alembic_dir))\n alembic_cfg.set_main_option(\"sqlalchemy.url\", url)\n return alembic_cfg\n\n\ndef migrate(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(engine.url.render_as_string(hide_password=False))\n with engine.begin() as conn:\n alembic_cfg.attributes[\"connection\"] = conn\n upgrade(alembic_cfg, revision)\n\n\ndef migrate_if_needed(engine: Engine, revision: str) -> None:\n alembic_cfg = _get_alembic_config(str(engine.url))\n script_dir = ScriptDirectory.from_config(alembic_cfg)\n with engine.begin() as conn:\n context = MigrationContext.configure(conn)\n if context.get_current_revision() != script_dir.get_current_head():\n upgrade(alembic_cfg, revision)\n", "path": "mlflow/server/auth/db/utils.py"}]}
| 3,811 | 138 |
gh_patches_debug_29526
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-2829
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
exclude_from_nav and show_excluded_items are not handled correctly in portal_tabs_view
### What I did:
Create a folder on top level and exclude id from navigation
### What I expect to happen:
the folder should disappear from global navigation **when I'm not inside it's context**
### What actually happened:
the folder is always in the navigation
### What version of Plone/ Addons I am using:
plone 5.2rc2
### REASON:
I'm fixing a bug found here: https://github.com/plone/plone.app.layout/issues/190
which leads to the same result in `portal_tabs_view` here https://github.com/plone/Products.CMFPlone/blob/master/Products/CMFPlone/browser/navigation.py#L151-L153
if `show_excluded_items` is activated (which actually is the default setting) every item is shown regardless of `exclude_from_nav` setting. But what the setting claims is to show the excluded item only if you're inside of it.
PR is in preparation...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/browser/navigation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from Acquisition import aq_base
3 from Acquisition import aq_inner
4 from plone.app.layout.navigation.interfaces import INavtreeStrategy
5 from plone.app.layout.navigation.navtree import buildFolderTree
6 from plone.app.layout.navigation.root import getNavigationRoot
7 from plone.registry.interfaces import IRegistry
8 from Products.CMFCore.utils import getToolByName
9 from Products.CMFPlone import utils
10 from Products.CMFPlone.browser.interfaces import INavigationBreadcrumbs
11 from Products.CMFPlone.browser.interfaces import INavigationTabs
12 from Products.CMFPlone.browser.interfaces import ISiteMap
13 from Products.CMFPlone.browser.navtree import SitemapQueryBuilder
14 from Products.CMFPlone.interfaces import IHideFromBreadcrumbs
15 from Products.CMFPlone.interfaces import INavigationSchema
16 from Products.Five import BrowserView
17 from zope.component import getMultiAdapter
18 from zope.component import getUtility
19 from zope.interface import implementer
20
21
22 def get_url(item):
23 if not item:
24 return None
25 if hasattr(aq_base(item), 'getURL'):
26 # Looks like a brain
27 return item.getURL()
28 return item.absolute_url()
29
30
31 def get_id(item):
32 if not item:
33 return None
34 getId = getattr(item, 'getId')
35 if not utils.safe_callable(getId):
36 # Looks like a brain
37 return getId
38 return getId()
39
40
41 def get_view_url(context):
42 registry = getUtility(IRegistry)
43 view_action_types = registry.get(
44 'plone.types_use_view_action_in_listings', [])
45 item_url = get_url(context)
46 name = get_id(context)
47
48 if getattr(context, 'portal_type', {}) in view_action_types:
49 item_url += '/view'
50 name += '/view'
51
52 return name, item_url
53
54
55 @implementer(ISiteMap)
56 class CatalogSiteMap(BrowserView):
57
58 def siteMap(self):
59 context = aq_inner(self.context)
60
61 queryBuilder = SitemapQueryBuilder(context)
62 query = queryBuilder()
63 strategy = getMultiAdapter((context, self), INavtreeStrategy)
64
65 return buildFolderTree(
66 context, obj=context,
67 query=query, strategy=strategy
68 )
69
70
71 @implementer(INavigationTabs)
72 class CatalogNavigationTabs(BrowserView):
73
74 def _getNavQuery(self):
75 # check whether we only want actions
76 registry = getUtility(IRegistry)
77 navigation_settings = registry.forInterface(
78 INavigationSchema,
79 prefix="plone",
80 check=False
81 )
82 customQuery = getattr(self.context, 'getCustomNavQuery', False)
83 if customQuery is not None and utils.safe_callable(customQuery):
84 query = customQuery()
85 else:
86 query = {}
87
88 query['path'] = {
89 'query': getNavigationRoot(self.context),
90 'depth': 1
91 }
92 query['portal_type'] = [t for t in navigation_settings.displayed_types]
93 query['sort_on'] = navigation_settings.sort_tabs_on
94 if navigation_settings.sort_tabs_reversed:
95 query['sort_order'] = 'reverse'
96 else:
97 query['sort_order'] = 'ascending'
98
99 if navigation_settings.filter_on_workflow:
100 query['review_state'] = navigation_settings.workflow_states_to_show
101
102 query['is_default_page'] = False
103
104 if not navigation_settings.nonfolderish_tabs:
105 query['is_folderish'] = True
106
107 return query
108
109 def topLevelTabs(self, actions=None, category='portal_tabs'):
110 context = aq_inner(self.context)
111 registry = getUtility(IRegistry)
112 navigation_settings = registry.forInterface(
113 INavigationSchema,
114 prefix="plone",
115 check=False
116 )
117 mtool = getToolByName(context, 'portal_membership')
118 member = mtool.getAuthenticatedMember().id
119 catalog = getToolByName(context, 'portal_catalog')
120
121 if actions is None:
122 context_state = getMultiAdapter(
123 (context, self.request),
124 name=u'plone_context_state'
125 )
126 actions = context_state.actions(category)
127
128 # Build result dict
129 result = []
130 # first the actions
131 for actionInfo in actions:
132 data = actionInfo.copy()
133 data['name'] = data['title']
134 result.append(data)
135
136 # check whether we only want actions
137 if not navigation_settings.generate_tabs:
138 return result
139
140 query = self._getNavQuery()
141
142 rawresult = catalog.searchResults(query)
143
144 def _get_url(item):
145 if item.getRemoteUrl and not member == item.Creator:
146 return (get_id(item), item.getRemoteUrl)
147 return get_view_url(item)
148
149 # now add the content to results
150 for item in rawresult:
151 if not navigation_settings.show_excluded_items:
152 if item.exclude_from_nav:
153 continue
154 cid, item_url = _get_url(item)
155 data = {
156 'name': utils.pretty_title_or_id(context, item),
157 'id': item.getId,
158 'url': item_url,
159 'description': item.Description,
160 'review_state': item.review_state
161 }
162 result.append(data)
163
164 return result
165
166
167 @implementer(INavigationBreadcrumbs)
168 class CatalogNavigationBreadcrumbs(BrowserView):
169
170 def breadcrumbs(self):
171 context = aq_inner(self.context)
172 catalog = getToolByName(context, 'portal_catalog')
173 query = {}
174
175 # Check to see if the current page is a folder default view, if so
176 # get breadcrumbs from the parent folder
177 if utils.isDefaultPage(context, self.request):
178 currentPath = '/'.join(utils.parent(context).getPhysicalPath())
179 else:
180 currentPath = '/'.join(context.getPhysicalPath())
181 query['path'] = {'query': currentPath, 'navtree': 1, 'depth': 0}
182
183 rawresult = catalog(**query)
184
185 # Sort items on path length
186 dec_result = [(len(r.getPath()), r) for r in rawresult]
187 dec_result.sort()
188
189 rootPath = getNavigationRoot(context)
190
191 # Build result dict
192 result = []
193 for r_tuple in dec_result:
194 item = r_tuple[1]
195
196 # Don't include it if it would be above the navigation root
197 itemPath = item.getPath()
198 if rootPath.startswith(itemPath):
199 continue
200
201 cid, item_url = get_view_url(item)
202 data = {
203 'Title': utils.pretty_title_or_id(context, item),
204 'absolute_url': item_url
205 }
206 result.append(data)
207 return result
208
209
210 @implementer(INavigationBreadcrumbs)
211 class PhysicalNavigationBreadcrumbs(BrowserView):
212
213 def breadcrumbs(self):
214 context = aq_inner(self.context)
215 request = self.request
216 container = utils.parent(context)
217
218 name, item_url = get_view_url(context)
219
220 if container is None:
221 return ({
222 'absolute_url': item_url,
223 'Title': utils.pretty_title_or_id(context, context),
224 },)
225
226 view = getMultiAdapter((container, request), name='breadcrumbs_view')
227 base = tuple(view.breadcrumbs())
228
229 # Some things want to be hidden from the breadcrumbs
230 if IHideFromBreadcrumbs.providedBy(context):
231 return base
232
233 if base:
234 item_url = '%s/%s' % (base[-1]['absolute_url'], name)
235
236 rootPath = getNavigationRoot(context)
237 itemPath = '/'.join(context.getPhysicalPath())
238
239 # don't show default pages in breadcrumbs or pages above the navigation
240 # root
241 if not utils.isDefaultPage(context, request) \
242 and not rootPath.startswith(itemPath):
243 base += ({
244 'absolute_url': item_url,
245 'Title': utils.pretty_title_or_id(context, context),
246 },)
247 return base
248
249
250 @implementer(INavigationBreadcrumbs)
251 class RootPhysicalNavigationBreadcrumbs(BrowserView):
252
253 def breadcrumbs(self):
254 # XXX Root never gets included, it's hardcoded as 'Home' in
255 # the template. We will fix and remove the hardcoding and fix
256 # the tests.
257 return ()
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/browser/navigation.py b/Products/CMFPlone/browser/navigation.py
--- a/Products/CMFPlone/browser/navigation.py
+++ b/Products/CMFPlone/browser/navigation.py
@@ -72,7 +72,7 @@
class CatalogNavigationTabs(BrowserView):
def _getNavQuery(self):
- # check whether we only want actions
+ # check whether we only want actions
registry = getUtility(IRegistry)
navigation_settings = registry.forInterface(
INavigationSchema,
@@ -101,6 +101,9 @@
query['is_default_page'] = False
+ if not navigation_settings.show_excluded_items:
+ query['exclude_from_nav'] = False
+
if not navigation_settings.nonfolderish_tabs:
query['is_folderish'] = True
@@ -146,11 +149,13 @@
return (get_id(item), item.getRemoteUrl)
return get_view_url(item)
+ context_path = '/'.join(context.getPhysicalPath())
+
# now add the content to results
for item in rawresult:
- if not navigation_settings.show_excluded_items:
- if item.exclude_from_nav:
- continue
+ if item.exclude_from_nav and not context_path.startswith(item.getPath()): # noqa: E501
+ # skip excluded items if they're not in our context path
+ continue
cid, item_url = _get_url(item)
data = {
'name': utils.pretty_title_or_id(context, item),
|
{"golden_diff": "diff --git a/Products/CMFPlone/browser/navigation.py b/Products/CMFPlone/browser/navigation.py\n--- a/Products/CMFPlone/browser/navigation.py\n+++ b/Products/CMFPlone/browser/navigation.py\n@@ -72,7 +72,7 @@\n class CatalogNavigationTabs(BrowserView):\n \n def _getNavQuery(self):\n- # check whether we only want actions\n+ # check whether we only want actions\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n@@ -101,6 +101,9 @@\n \n query['is_default_page'] = False\n \n+ if not navigation_settings.show_excluded_items:\n+ query['exclude_from_nav'] = False\n+\n if not navigation_settings.nonfolderish_tabs:\n query['is_folderish'] = True\n \n@@ -146,11 +149,13 @@\n return (get_id(item), item.getRemoteUrl)\n return get_view_url(item)\n \n+ context_path = '/'.join(context.getPhysicalPath())\n+\n # now add the content to results\n for item in rawresult:\n- if not navigation_settings.show_excluded_items:\n- if item.exclude_from_nav:\n- continue\n+ if item.exclude_from_nav and not context_path.startswith(item.getPath()): # noqa: E501\n+ # skip excluded items if they're not in our context path\n+ continue\n cid, item_url = _get_url(item)\n data = {\n 'name': utils.pretty_title_or_id(context, item),\n", "issue": "exclude_from_nav and show_excluded_items are not handled correctly in portal_tabs_view\n### What I did:\r\n\r\nCreate a folder on top level and exclude id from navigation\r\n\r\n### What I expect to happen:\r\n\r\nthe folder should disappear from global navigation **when I'm not inside it's context**\r\n\r\n### What actually happened:\r\n\r\nthe folder is always in the navigation\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nplone 5.2rc2\r\n\r\n### REASON:\r\n\r\nI'm fixing a bug found here: https://github.com/plone/plone.app.layout/issues/190\r\nwhich leads to the same result in `portal_tabs_view` here https://github.com/plone/Products.CMFPlone/blob/master/Products/CMFPlone/browser/navigation.py#L151-L153\r\n\r\nif `show_excluded_items` is activated (which actually is the default setting) every item is shown regardless of `exclude_from_nav` setting. But what the setting claims is to show the excluded item only if you're inside of it.\r\n\r\nPR is in preparation...\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom Acquisition import aq_inner\nfrom plone.app.layout.navigation.interfaces import INavtreeStrategy\nfrom plone.app.layout.navigation.navtree import buildFolderTree\nfrom plone.app.layout.navigation.root import getNavigationRoot\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import utils\nfrom Products.CMFPlone.browser.interfaces import INavigationBreadcrumbs\nfrom Products.CMFPlone.browser.interfaces import INavigationTabs\nfrom Products.CMFPlone.browser.interfaces import ISiteMap\nfrom Products.CMFPlone.browser.navtree import SitemapQueryBuilder\nfrom Products.CMFPlone.interfaces import IHideFromBreadcrumbs\nfrom Products.CMFPlone.interfaces import INavigationSchema\nfrom Products.Five import BrowserView\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.interface import implementer\n\n\ndef get_url(item):\n if not item:\n return None\n if hasattr(aq_base(item), 'getURL'):\n # Looks like a brain\n return item.getURL()\n return item.absolute_url()\n\n\ndef get_id(item):\n if not item:\n return None\n getId = getattr(item, 'getId')\n if not utils.safe_callable(getId):\n # Looks like a brain\n return getId\n return getId()\n\n\ndef get_view_url(context):\n registry = getUtility(IRegistry)\n view_action_types = registry.get(\n 'plone.types_use_view_action_in_listings', [])\n item_url = get_url(context)\n name = get_id(context)\n\n if getattr(context, 'portal_type', {}) in view_action_types:\n item_url += '/view'\n name += '/view'\n\n return name, item_url\n\n\n@implementer(ISiteMap)\nclass CatalogSiteMap(BrowserView):\n\n def siteMap(self):\n context = aq_inner(self.context)\n\n queryBuilder = SitemapQueryBuilder(context)\n query = queryBuilder()\n strategy = getMultiAdapter((context, self), INavtreeStrategy)\n\n return buildFolderTree(\n context, obj=context,\n query=query, strategy=strategy\n )\n\n\n@implementer(INavigationTabs)\nclass CatalogNavigationTabs(BrowserView):\n\n def _getNavQuery(self):\n # check whether we only want actions\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n prefix=\"plone\",\n check=False\n )\n customQuery = getattr(self.context, 'getCustomNavQuery', False)\n if customQuery is not None and utils.safe_callable(customQuery):\n query = customQuery()\n else:\n query = {}\n\n query['path'] = {\n 'query': getNavigationRoot(self.context),\n 'depth': 1\n }\n query['portal_type'] = [t for t in navigation_settings.displayed_types]\n query['sort_on'] = navigation_settings.sort_tabs_on\n if navigation_settings.sort_tabs_reversed:\n query['sort_order'] = 'reverse'\n else:\n query['sort_order'] = 'ascending'\n\n if navigation_settings.filter_on_workflow:\n query['review_state'] = navigation_settings.workflow_states_to_show\n\n query['is_default_page'] = False\n\n if not navigation_settings.nonfolderish_tabs:\n query['is_folderish'] = True\n\n return query\n\n def topLevelTabs(self, actions=None, category='portal_tabs'):\n context = aq_inner(self.context)\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n prefix=\"plone\",\n check=False\n )\n mtool = getToolByName(context, 'portal_membership')\n member = mtool.getAuthenticatedMember().id\n catalog = getToolByName(context, 'portal_catalog')\n\n if actions is None:\n context_state = getMultiAdapter(\n (context, self.request),\n name=u'plone_context_state'\n )\n actions = context_state.actions(category)\n\n # Build result dict\n result = []\n # first the actions\n for actionInfo in actions:\n data = actionInfo.copy()\n data['name'] = data['title']\n result.append(data)\n\n # check whether we only want actions\n if not navigation_settings.generate_tabs:\n return result\n\n query = self._getNavQuery()\n\n rawresult = catalog.searchResults(query)\n\n def _get_url(item):\n if item.getRemoteUrl and not member == item.Creator:\n return (get_id(item), item.getRemoteUrl)\n return get_view_url(item)\n\n # now add the content to results\n for item in rawresult:\n if not navigation_settings.show_excluded_items:\n if item.exclude_from_nav:\n continue\n cid, item_url = _get_url(item)\n data = {\n 'name': utils.pretty_title_or_id(context, item),\n 'id': item.getId,\n 'url': item_url,\n 'description': item.Description,\n 'review_state': item.review_state\n }\n result.append(data)\n\n return result\n\n\n@implementer(INavigationBreadcrumbs)\nclass CatalogNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n query = {}\n\n # Check to see if the current page is a folder default view, if so\n # get breadcrumbs from the parent folder\n if utils.isDefaultPage(context, self.request):\n currentPath = '/'.join(utils.parent(context).getPhysicalPath())\n else:\n currentPath = '/'.join(context.getPhysicalPath())\n query['path'] = {'query': currentPath, 'navtree': 1, 'depth': 0}\n\n rawresult = catalog(**query)\n\n # Sort items on path length\n dec_result = [(len(r.getPath()), r) for r in rawresult]\n dec_result.sort()\n\n rootPath = getNavigationRoot(context)\n\n # Build result dict\n result = []\n for r_tuple in dec_result:\n item = r_tuple[1]\n\n # Don't include it if it would be above the navigation root\n itemPath = item.getPath()\n if rootPath.startswith(itemPath):\n continue\n\n cid, item_url = get_view_url(item)\n data = {\n 'Title': utils.pretty_title_or_id(context, item),\n 'absolute_url': item_url\n }\n result.append(data)\n return result\n\n\n@implementer(INavigationBreadcrumbs)\nclass PhysicalNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n context = aq_inner(self.context)\n request = self.request\n container = utils.parent(context)\n\n name, item_url = get_view_url(context)\n\n if container is None:\n return ({\n 'absolute_url': item_url,\n 'Title': utils.pretty_title_or_id(context, context),\n },)\n\n view = getMultiAdapter((container, request), name='breadcrumbs_view')\n base = tuple(view.breadcrumbs())\n\n # Some things want to be hidden from the breadcrumbs\n if IHideFromBreadcrumbs.providedBy(context):\n return base\n\n if base:\n item_url = '%s/%s' % (base[-1]['absolute_url'], name)\n\n rootPath = getNavigationRoot(context)\n itemPath = '/'.join(context.getPhysicalPath())\n\n # don't show default pages in breadcrumbs or pages above the navigation\n # root\n if not utils.isDefaultPage(context, request) \\\n and not rootPath.startswith(itemPath):\n base += ({\n 'absolute_url': item_url,\n 'Title': utils.pretty_title_or_id(context, context),\n },)\n return base\n\n\n@implementer(INavigationBreadcrumbs)\nclass RootPhysicalNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n # XXX Root never gets included, it's hardcoded as 'Home' in\n # the template. We will fix and remove the hardcoding and fix\n # the tests.\n return ()\n", "path": "Products/CMFPlone/browser/navigation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom Acquisition import aq_inner\nfrom plone.app.layout.navigation.interfaces import INavtreeStrategy\nfrom plone.app.layout.navigation.navtree import buildFolderTree\nfrom plone.app.layout.navigation.root import getNavigationRoot\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone import utils\nfrom Products.CMFPlone.browser.interfaces import INavigationBreadcrumbs\nfrom Products.CMFPlone.browser.interfaces import INavigationTabs\nfrom Products.CMFPlone.browser.interfaces import ISiteMap\nfrom Products.CMFPlone.browser.navtree import SitemapQueryBuilder\nfrom Products.CMFPlone.interfaces import IHideFromBreadcrumbs\nfrom Products.CMFPlone.interfaces import INavigationSchema\nfrom Products.Five import BrowserView\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\nfrom zope.interface import implementer\n\n\ndef get_url(item):\n if not item:\n return None\n if hasattr(aq_base(item), 'getURL'):\n # Looks like a brain\n return item.getURL()\n return item.absolute_url()\n\n\ndef get_id(item):\n if not item:\n return None\n getId = getattr(item, 'getId')\n if not utils.safe_callable(getId):\n # Looks like a brain\n return getId\n return getId()\n\n\ndef get_view_url(context):\n registry = getUtility(IRegistry)\n view_action_types = registry.get(\n 'plone.types_use_view_action_in_listings', [])\n item_url = get_url(context)\n name = get_id(context)\n\n if getattr(context, 'portal_type', {}) in view_action_types:\n item_url += '/view'\n name += '/view'\n\n return name, item_url\n\n\n@implementer(ISiteMap)\nclass CatalogSiteMap(BrowserView):\n\n def siteMap(self):\n context = aq_inner(self.context)\n\n queryBuilder = SitemapQueryBuilder(context)\n query = queryBuilder()\n strategy = getMultiAdapter((context, self), INavtreeStrategy)\n\n return buildFolderTree(\n context, obj=context,\n query=query, strategy=strategy\n )\n\n\n@implementer(INavigationTabs)\nclass CatalogNavigationTabs(BrowserView):\n\n def _getNavQuery(self):\n # check whether we only want actions\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n prefix=\"plone\",\n check=False\n )\n customQuery = getattr(self.context, 'getCustomNavQuery', False)\n if customQuery is not None and utils.safe_callable(customQuery):\n query = customQuery()\n else:\n query = {}\n\n query['path'] = {\n 'query': getNavigationRoot(self.context),\n 'depth': 1\n }\n query['portal_type'] = [t for t in navigation_settings.displayed_types]\n query['sort_on'] = navigation_settings.sort_tabs_on\n if navigation_settings.sort_tabs_reversed:\n query['sort_order'] = 'reverse'\n else:\n query['sort_order'] = 'ascending'\n\n if navigation_settings.filter_on_workflow:\n query['review_state'] = navigation_settings.workflow_states_to_show\n\n query['is_default_page'] = False\n\n if not navigation_settings.show_excluded_items:\n query['exclude_from_nav'] = False\n\n if not navigation_settings.nonfolderish_tabs:\n query['is_folderish'] = True\n\n return query\n\n def topLevelTabs(self, actions=None, category='portal_tabs'):\n context = aq_inner(self.context)\n registry = getUtility(IRegistry)\n navigation_settings = registry.forInterface(\n INavigationSchema,\n prefix=\"plone\",\n check=False\n )\n mtool = getToolByName(context, 'portal_membership')\n member = mtool.getAuthenticatedMember().id\n catalog = getToolByName(context, 'portal_catalog')\n\n if actions is None:\n context_state = getMultiAdapter(\n (context, self.request),\n name=u'plone_context_state'\n )\n actions = context_state.actions(category)\n\n # Build result dict\n result = []\n # first the actions\n for actionInfo in actions:\n data = actionInfo.copy()\n data['name'] = data['title']\n result.append(data)\n\n # check whether we only want actions\n if not navigation_settings.generate_tabs:\n return result\n\n query = self._getNavQuery()\n\n rawresult = catalog.searchResults(query)\n\n def _get_url(item):\n if item.getRemoteUrl and not member == item.Creator:\n return (get_id(item), item.getRemoteUrl)\n return get_view_url(item)\n\n context_path = '/'.join(context.getPhysicalPath())\n\n # now add the content to results\n for item in rawresult:\n if item.exclude_from_nav and not context_path.startswith(item.getPath()): # noqa: E501\n # skip excluded items if they're not in our context path\n continue\n cid, item_url = _get_url(item)\n data = {\n 'name': utils.pretty_title_or_id(context, item),\n 'id': item.getId,\n 'url': item_url,\n 'description': item.Description,\n 'review_state': item.review_state\n }\n result.append(data)\n\n return result\n\n\n@implementer(INavigationBreadcrumbs)\nclass CatalogNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n query = {}\n\n # Check to see if the current page is a folder default view, if so\n # get breadcrumbs from the parent folder\n if utils.isDefaultPage(context, self.request):\n currentPath = '/'.join(utils.parent(context).getPhysicalPath())\n else:\n currentPath = '/'.join(context.getPhysicalPath())\n query['path'] = {'query': currentPath, 'navtree': 1, 'depth': 0}\n\n rawresult = catalog(**query)\n\n # Sort items on path length\n dec_result = [(len(r.getPath()), r) for r in rawresult]\n dec_result.sort()\n\n rootPath = getNavigationRoot(context)\n\n # Build result dict\n result = []\n for r_tuple in dec_result:\n item = r_tuple[1]\n\n # Don't include it if it would be above the navigation root\n itemPath = item.getPath()\n if rootPath.startswith(itemPath):\n continue\n\n cid, item_url = get_view_url(item)\n data = {\n 'Title': utils.pretty_title_or_id(context, item),\n 'absolute_url': item_url\n }\n result.append(data)\n return result\n\n\n@implementer(INavigationBreadcrumbs)\nclass PhysicalNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n context = aq_inner(self.context)\n request = self.request\n container = utils.parent(context)\n\n name, item_url = get_view_url(context)\n\n if container is None:\n return ({\n 'absolute_url': item_url,\n 'Title': utils.pretty_title_or_id(context, context),\n },)\n\n view = getMultiAdapter((container, request), name='breadcrumbs_view')\n base = tuple(view.breadcrumbs())\n\n # Some things want to be hidden from the breadcrumbs\n if IHideFromBreadcrumbs.providedBy(context):\n return base\n\n if base:\n item_url = '%s/%s' % (base[-1]['absolute_url'], name)\n\n rootPath = getNavigationRoot(context)\n itemPath = '/'.join(context.getPhysicalPath())\n\n # don't show default pages in breadcrumbs or pages above the navigation\n # root\n if not utils.isDefaultPage(context, request) \\\n and not rootPath.startswith(itemPath):\n base += ({\n 'absolute_url': item_url,\n 'Title': utils.pretty_title_or_id(context, context),\n },)\n return base\n\n\n@implementer(INavigationBreadcrumbs)\nclass RootPhysicalNavigationBreadcrumbs(BrowserView):\n\n def breadcrumbs(self):\n # XXX Root never gets included, it's hardcoded as 'Home' in\n # the template. We will fix and remove the hardcoding and fix\n # the tests.\n return ()\n", "path": "Products/CMFPlone/browser/navigation.py"}]}
| 2,894 | 350 |
gh_patches_debug_31548
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-1194
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`select_disk` throw an error because BlockDevice does not contain label
Running this line from a python script causes an error:
`archinstall.select_disk(archinstall.all_blockdevices())`
```
Traceback (most recent call last):
File "/root/test.py", line 3, in <module>
hdd = archinstall.select_disk(archinstall.all_blockdevices())
File "/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py", line 68, in select_disk
f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
File "/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py", line 45, in __getitem__
raise KeyError(f'{self} does not contain information: "{key}"')
KeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: "label"'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `archinstall/lib/user_interaction/disk_conf.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any, Dict, TYPE_CHECKING, Optional
4
5 from .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout
6 from ..disk import BlockDevice
7 from ..exceptions import DiskError
8 from ..menu import Menu
9 from ..menu.menu import MenuSelectionType
10 from ..output import log
11
12 if TYPE_CHECKING:
13 _: Any
14
15
16 def ask_for_main_filesystem_format(advanced_options=False) -> str:
17 options = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}
18
19 advanced = {'ntfs': 'ntfs'}
20
21 if advanced_options:
22 options.update(advanced)
23
24 prompt = _('Select which filesystem your main partition should use')
25 choice = Menu(prompt, options, skip=False).run()
26 return choice.value
27
28
29 def select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:
30 result = {}
31
32 for device in block_devices:
33 layout = manage_new_and_existing_partitions(device)
34 result[device.path] = layout
35
36 return result
37
38
39 def select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:
40 wipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))
41 custome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))
42 modes = [wipe_mode, custome_mode]
43
44 warning = str(_('Are you sure you want to reset this setting?'))
45
46 choice = Menu(
47 _('Select what you wish to do with the selected block devices'),
48 modes,
49 explode_on_interrupt=True,
50 explode_warning=warning
51 ).run()
52
53 match choice.type_:
54 case MenuSelectionType.Esc: return preset
55 case MenuSelectionType.Ctrl_c: return None
56 case MenuSelectionType.Selection:
57 if choice.value == wipe_mode:
58 return get_default_partition_layout(block_devices, advanced_options)
59 else:
60 return select_individual_blockdevice_usage(block_devices)
61
62
63 def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
64 """
65 Asks the user to select a harddrive from the `dict_o_disks` selection.
66 Usually this is combined with :ref:`archinstall.list_drives`.
67
68 :param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.
69 :type dict_o_disks: dict
70
71 :return: The name/path (the dictionary key) of the selected drive
72 :rtype: str
73 """
74 drives = sorted(list(dict_o_disks.keys()))
75 if len(drives) >= 1:
76 for index, drive in enumerate(drives):
77 print(
78 f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
79 )
80
81 log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
82 fg="yellow")
83
84 drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
85 if not drive:
86 return drive
87
88 drive = dict_o_disks[drive]
89 return drive
90
91 raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py
--- a/archinstall/lib/user_interaction/disk_conf.py
+++ b/archinstall/lib/user_interaction/disk_conf.py
@@ -7,7 +7,6 @@
from ..exceptions import DiskError
from ..menu import Menu
from ..menu.menu import MenuSelectionType
-from ..output import log
if TYPE_CHECKING:
_: Any
@@ -60,7 +59,7 @@
return select_individual_blockdevice_usage(block_devices)
-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:
"""
Asks the user to select a harddrive from the `dict_o_disks` selection.
Usually this is combined with :ref:`archinstall.list_drives`.
@@ -73,19 +72,15 @@
"""
drives = sorted(list(dict_o_disks.keys()))
if len(drives) >= 1:
- for index, drive in enumerate(drives):
- print(
- f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
- )
+ title = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\n'
+ title += str(_('Select one of the disks or skip and use /mnt as default'))
- log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
- fg="yellow")
+ choice = Menu(title, drives).run()
- drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
- if not drive:
- return drive
+ if choice.type_ == MenuSelectionType.Esc:
+ return None
- drive = dict_o_disks[drive]
+ drive = dict_o_disks[choice.value]
return drive
raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
|
{"golden_diff": "diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py\n--- a/archinstall/lib/user_interaction/disk_conf.py\n+++ b/archinstall/lib/user_interaction/disk_conf.py\n@@ -7,7 +7,6 @@\n from ..exceptions import DiskError\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n-from ..output import log\n \n if TYPE_CHECKING:\n \t_: Any\n@@ -60,7 +59,7 @@\n \t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n \n \n-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:\n \t\"\"\"\n \tAsks the user to select a harddrive from the `dict_o_disks` selection.\n \tUsually this is combined with :ref:`archinstall.list_drives`.\n@@ -73,19 +72,15 @@\n \t\"\"\"\n \tdrives = sorted(list(dict_o_disks.keys()))\n \tif len(drives) >= 1:\n-\t\tfor index, drive in enumerate(drives):\n-\t\t\tprint(\n-\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n-\t\t\t)\n+\t\ttitle = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\\n'\n+\t\ttitle += str(_('Select one of the disks or skip and use /mnt as default'))\n \n-\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n-\t\t\tfg=\"yellow\")\n+\t\tchoice = Menu(title, drives).run()\n \n-\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n-\t\tif not drive:\n-\t\t\treturn drive\n+\t\tif choice.type_ == MenuSelectionType.Esc:\n+\t\t\treturn None\n \n-\t\tdrive = dict_o_disks[drive]\n+\t\tdrive = dict_o_disks[choice.value]\n \t\treturn drive\n \n \traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "issue": "`select_disk` throw an error because BlockDevice does not contain label\nRunning this line from a python script causes an error: \r\n\r\n`archinstall.select_disk(archinstall.all_blockdevices())`\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/test.py\", line 3, in <module>\r\n hdd = archinstall.select_disk(archinstall.all_blockdevices())\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py\", line 68, in select_disk\r\n f\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py\", line 45, in __getitem__\r\n raise KeyError(f'{self} does not contain information: \"{key}\"')\r\nKeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: \"label\"'\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Dict, TYPE_CHECKING, Optional\n\nfrom .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout\nfrom ..disk import BlockDevice\nfrom ..exceptions import DiskError\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_for_main_filesystem_format(advanced_options=False) -> str:\n\toptions = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}\n\n\tadvanced = {'ntfs': 'ntfs'}\n\n\tif advanced_options:\n\t\toptions.update(advanced)\n\n\tprompt = _('Select which filesystem your main partition should use')\n\tchoice = Menu(prompt, options, skip=False).run()\n\treturn choice.value\n\n\ndef select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:\n\tresult = {}\n\n\tfor device in block_devices:\n\t\tlayout = manage_new_and_existing_partitions(device)\n\t\tresult[device.path] = layout\n\n\treturn result\n\n\ndef select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:\n\twipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))\n\tcustome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))\n\tmodes = [wipe_mode, custome_mode]\n\n\twarning = str(_('Are you sure you want to reset this setting?'))\n\n\tchoice = Menu(\n\t\t_('Select what you wish to do with the selected block devices'),\n\t\tmodes,\n\t\texplode_on_interrupt=True,\n\t\texplode_warning=warning\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return None\n\t\tcase MenuSelectionType.Selection:\n\t\t\tif choice.value == wipe_mode:\n\t\t\t\treturn get_default_partition_layout(block_devices, advanced_options)\n\t\t\telse:\n\t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n\n\ndef select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n\t\"\"\"\n\tAsks the user to select a harddrive from the `dict_o_disks` selection.\n\tUsually this is combined with :ref:`archinstall.list_drives`.\n\n\t:param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.\n\t:type dict_o_disks: dict\n\n\t:return: The name/path (the dictionary key) of the selected drive\n\t:rtype: str\n\t\"\"\"\n\tdrives = sorted(list(dict_o_disks.keys()))\n\tif len(drives) >= 1:\n\t\tfor index, drive in enumerate(drives):\n\t\t\tprint(\n\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n\t\t\t)\n\n\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n\t\t\tfg=\"yellow\")\n\n\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n\t\tif not drive:\n\t\t\treturn drive\n\n\t\tdrive = dict_o_disks[drive]\n\t\treturn drive\n\n\traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "path": "archinstall/lib/user_interaction/disk_conf.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Dict, TYPE_CHECKING, Optional\n\nfrom .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout\nfrom ..disk import BlockDevice\nfrom ..exceptions import DiskError\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_for_main_filesystem_format(advanced_options=False) -> str:\n\toptions = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}\n\n\tadvanced = {'ntfs': 'ntfs'}\n\n\tif advanced_options:\n\t\toptions.update(advanced)\n\n\tprompt = _('Select which filesystem your main partition should use')\n\tchoice = Menu(prompt, options, skip=False).run()\n\treturn choice.value\n\n\ndef select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:\n\tresult = {}\n\n\tfor device in block_devices:\n\t\tlayout = manage_new_and_existing_partitions(device)\n\t\tresult[device.path] = layout\n\n\treturn result\n\n\ndef select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:\n\twipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))\n\tcustome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))\n\tmodes = [wipe_mode, custome_mode]\n\n\twarning = str(_('Are you sure you want to reset this setting?'))\n\n\tchoice = Menu(\n\t\t_('Select what you wish to do with the selected block devices'),\n\t\tmodes,\n\t\texplode_on_interrupt=True,\n\t\texplode_warning=warning\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return None\n\t\tcase MenuSelectionType.Selection:\n\t\t\tif choice.value == wipe_mode:\n\t\t\t\treturn get_default_partition_layout(block_devices, advanced_options)\n\t\t\telse:\n\t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n\n\ndef select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:\n\t\"\"\"\n\tAsks the user to select a harddrive from the `dict_o_disks` selection.\n\tUsually this is combined with :ref:`archinstall.list_drives`.\n\n\t:param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.\n\t:type dict_o_disks: dict\n\n\t:return: The name/path (the dictionary key) of the selected drive\n\t:rtype: str\n\t\"\"\"\n\tdrives = sorted(list(dict_o_disks.keys()))\n\tif len(drives) >= 1:\n\t\ttitle = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\\n'\n\t\ttitle += str(_('Select one of the disks or skip and use /mnt as default'))\n\n\t\tchoice = Menu(title, drives).run()\n\n\t\tif choice.type_ == MenuSelectionType.Esc:\n\t\t\treturn None\n\n\t\tdrive = dict_o_disks[choice.value]\n\t\treturn drive\n\n\traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "path": "archinstall/lib/user_interaction/disk_conf.py"}]}
| 1,456 | 498 |
gh_patches_debug_25811
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__numpyro-356
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`plate` has unexpected behaviour when dist has batch_shape
I faced this issue while revising baseball example using `plate`. The repro code
```
import jax.numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro.handlers import trace, seed
def model():
with numpyro.plate('plate', 10):
numpyro.sample('x', dist.Normal(np.zeros(10), 1))
trace(seed(model, 0)).get_trace()['x']['value'].shape
```
should return `(10,)` but currently, it returns `(10, 10)`.
cc @neerajprad
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `numpyro/primitives.py`
Content:
```
1 from collections import namedtuple
2 import functools
3
4 import jax
5 from jax import lax
6
7 import numpyro
8 from numpyro.distributions.discrete import PRNGIdentity
9
10 _PYRO_STACK = []
11
12
13 CondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])
14
15
16 def apply_stack(msg):
17 pointer = 0
18 for pointer, handler in enumerate(reversed(_PYRO_STACK)):
19 handler.process_message(msg)
20 # When a Messenger sets the "stop" field of a message,
21 # it prevents any Messengers above it on the stack from being applied.
22 if msg.get("stop"):
23 break
24 if msg['value'] is None:
25 if msg['type'] == 'sample':
26 msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],
27 sample_intermediates=True,
28 **msg['kwargs'])
29 else:
30 msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])
31
32 # A Messenger that sets msg["stop"] == True also prevents application
33 # of postprocess_message by Messengers above it on the stack
34 # via the pointer variable from the process_message loop
35 for handler in _PYRO_STACK[-pointer-1:]:
36 handler.postprocess_message(msg)
37 return msg
38
39
40 class Messenger(object):
41 def __init__(self, fn=None):
42 self.fn = fn
43 functools.update_wrapper(self, fn, updated=[])
44
45 def __enter__(self):
46 _PYRO_STACK.append(self)
47
48 def __exit__(self, *args, **kwargs):
49 assert _PYRO_STACK[-1] is self
50 _PYRO_STACK.pop()
51
52 def process_message(self, msg):
53 pass
54
55 def postprocess_message(self, msg):
56 pass
57
58 def __call__(self, *args, **kwargs):
59 with self:
60 return self.fn(*args, **kwargs)
61
62
63 def sample(name, fn, obs=None, random_state=None, sample_shape=()):
64 """
65 Returns a random sample from the stochastic function `fn`. This can have
66 additional side effects when wrapped inside effect handlers like
67 :class:`~numpyro.handlers.substitute`.
68
69 .. note::
70 By design, `sample` primitive is meant to be used inside a NumPyro model.
71 Then :class:`~numpyro.handlers.seed` handler is used to inject a random
72 state to `fn`. In those situations, `random_state` keyword will take no
73 effect.
74
75 :param str name: name of the sample site
76 :param fn: Python callable
77 :param numpy.ndarray obs: observed value
78 :param jax.random.PRNGKey random_state: an optional random key for `fn`.
79 :param sample_shape: Shape of samples to be drawn.
80 :return: sample from the stochastic `fn`.
81 """
82 # if there are no active Messengers, we just draw a sample and return it as expected:
83 if not _PYRO_STACK:
84 return fn(random_state=random_state, sample_shape=sample_shape)
85
86 # Otherwise, we initialize a message...
87 initial_msg = {
88 'type': 'sample',
89 'name': name,
90 'fn': fn,
91 'args': (),
92 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},
93 'value': obs,
94 'scale': 1.0,
95 'is_observed': obs is not None,
96 'intermediates': [],
97 'cond_indep_stack': [],
98 }
99
100 # ...and use apply_stack to send it to the Messengers
101 msg = apply_stack(initial_msg)
102 return msg['value']
103
104
105 def identity(x, *args, **kwargs):
106 return x
107
108
109 def param(name, init_value=None, **kwargs):
110 """
111 Annotate the given site as an optimizable parameter for use with
112 :mod:`jax.experimental.optimizers`. For an example of how `param` statements
113 can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.
114
115 :param str name: name of site.
116 :param numpy.ndarray init_value: initial value specified by the user. Note that
117 the onus of using this to initialize the optimizer is on the user /
118 inference algorithm, since there is no global parameter store in
119 NumPyro.
120 :return: value for the parameter. Unless wrapped inside a
121 handler like :class:`~numpyro.handlers.substitute`, this will simply
122 return the initial value.
123 """
124 # if there are no active Messengers, we just draw a sample and return it as expected:
125 if not _PYRO_STACK:
126 return init_value
127
128 # Otherwise, we initialize a message...
129 initial_msg = {
130 'type': 'param',
131 'name': name,
132 'fn': identity,
133 'args': (init_value,),
134 'kwargs': kwargs,
135 'value': None,
136 'scale': 1.0,
137 'cond_indep_stack': [],
138 }
139
140 # ...and use apply_stack to send it to the Messengers
141 msg = apply_stack(initial_msg)
142 return msg['value']
143
144
145 def module(name, nn, input_shape=None):
146 """
147 Declare a :mod:`~jax.experimental.stax` style neural network inside a
148 model so that its parameters are registered for optimization via
149 :func:`~numpyro.primitives.param` statements.
150
151 :param str name: name of the module to be registered.
152 :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`
153 constructor function.
154 :param tuple input_shape: shape of the input taken by the
155 neural network.
156 :return: a `apply_fn` with bound parameters that takes an array
157 as an input and returns the neural network transformed output
158 array.
159 """
160 module_key = name + '$params'
161 nn_init, nn_apply = nn
162 nn_params = param(module_key)
163 if nn_params is None:
164 if input_shape is None:
165 raise ValueError('Valid value for `input_size` needed to initialize.')
166 rng = numpyro.sample(name + '$rng', PRNGIdentity())
167 _, nn_params = nn_init(rng, input_shape)
168 param(module_key, nn_params)
169 return jax.partial(nn_apply, nn_params)
170
171
172 class plate(Messenger):
173 """
174 Construct for annotating conditionally independent variables. Within a
175 `plate` context manager, `sample` sites will be automatically broadcasted to
176 the size of the plate. Additionally, a scale factor might be applied by
177 certain inference algorithms if `subsample_size` is specified.
178
179 :param str name: Name of the plate.
180 :param int size: Size of the plate.
181 :param int subsample_size: Optional argument denoting the size of the mini-batch.
182 This can be used to apply a scaling factor by inference algorithms. e.g.
183 when computing ELBO using a mini-batch.
184 :param int dim: Optional argument to specify which dimension in the tensor
185 is used as the plate dim. If `None` (default), the leftmost available dim
186 is allocated.
187 """
188 def __init__(self, name, size, subsample_size=None, dim=None):
189 self.name = name
190 self.size = size
191 self.subsample_size = size if subsample_size is None else subsample_size
192 if dim is not None and dim >= 0:
193 raise ValueError('dim arg must be negative.')
194 self.dim = dim
195 self._validate_and_set_dim()
196 super(plate, self).__init__()
197
198 def _validate_and_set_dim(self):
199 msg = {
200 'type': 'plate',
201 'fn': identity,
202 'name': self.name,
203 'args': (None,),
204 'kwargs': {},
205 'value': None,
206 'scale': 1.0,
207 'cond_indep_stack': [],
208 }
209 apply_stack(msg)
210 cond_indep_stack = msg['cond_indep_stack']
211 occupied_dims = {f.dim for f in cond_indep_stack}
212 dim = -1
213 while True:
214 if dim not in occupied_dims:
215 break
216 dim -= 1
217 if self.dim is None:
218 self.dim = dim
219 else:
220 assert self.dim not in occupied_dims
221
222 @staticmethod
223 def _get_batch_shape(cond_indep_stack):
224 n_dims = max(-f.dim for f in cond_indep_stack)
225 batch_shape = [1] * n_dims
226 for f in cond_indep_stack:
227 batch_shape[f.dim] = f.size
228 return tuple(batch_shape)
229
230 def process_message(self, msg):
231 cond_indep_stack = msg['cond_indep_stack']
232 frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)
233 cond_indep_stack.append(frame)
234 batch_shape = self._get_batch_shape(cond_indep_stack)
235 if 'sample_shape' in msg['kwargs']:
236 batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)
237 msg['kwargs']['sample_shape'] = batch_shape
238 msg['scale'] = msg['scale'] * self.size / self.subsample_size
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/numpyro/primitives.py b/numpyro/primitives.py
--- a/numpyro/primitives.py
+++ b/numpyro/primitives.py
@@ -231,7 +231,20 @@
cond_indep_stack = msg['cond_indep_stack']
frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)
cond_indep_stack.append(frame)
- batch_shape = self._get_batch_shape(cond_indep_stack)
+ expected_shape = self._get_batch_shape(cond_indep_stack)
+ dist_batch_shape = msg['fn'].batch_shape if msg['type'] == 'sample' else ()
+ overlap_idx = len(expected_shape) - len(dist_batch_shape)
+ if overlap_idx < 0:
+ raise ValueError('Expected dimensions within plate = {}, which is less than the '
+ 'distribution\'s batch shape = {}.'.format(len(expected_shape), len(dist_batch_shape)))
+ trailing_shape = expected_shape[overlap_idx:]
+ # e.g. distribution with batch shape (1, 5) cannot be broadcast to (5, 5)
+ broadcast_shape = lax.broadcast_shapes(trailing_shape, dist_batch_shape)
+ if broadcast_shape != dist_batch_shape:
+ raise ValueError('Distribution batch shape = {} cannot be broadcast up to {}. '
+ 'Consider using unbatched distributions.'
+ .format(dist_batch_shape, broadcast_shape))
+ batch_shape = expected_shape[:overlap_idx]
if 'sample_shape' in msg['kwargs']:
batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)
msg['kwargs']['sample_shape'] = batch_shape
|
{"golden_diff": "diff --git a/numpyro/primitives.py b/numpyro/primitives.py\n--- a/numpyro/primitives.py\n+++ b/numpyro/primitives.py\n@@ -231,7 +231,20 @@\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n- batch_shape = self._get_batch_shape(cond_indep_stack)\n+ expected_shape = self._get_batch_shape(cond_indep_stack)\n+ dist_batch_shape = msg['fn'].batch_shape if msg['type'] == 'sample' else ()\n+ overlap_idx = len(expected_shape) - len(dist_batch_shape)\n+ if overlap_idx < 0:\n+ raise ValueError('Expected dimensions within plate = {}, which is less than the '\n+ 'distribution\\'s batch shape = {}.'.format(len(expected_shape), len(dist_batch_shape)))\n+ trailing_shape = expected_shape[overlap_idx:]\n+ # e.g. distribution with batch shape (1, 5) cannot be broadcast to (5, 5)\n+ broadcast_shape = lax.broadcast_shapes(trailing_shape, dist_batch_shape)\n+ if broadcast_shape != dist_batch_shape:\n+ raise ValueError('Distribution batch shape = {} cannot be broadcast up to {}. '\n+ 'Consider using unbatched distributions.'\n+ .format(dist_batch_shape, broadcast_shape))\n+ batch_shape = expected_shape[:overlap_idx]\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n", "issue": "`plate` has unexpected behaviour when dist has batch_shape\nI faced this issue while revising baseball example using `plate`. The repro code\r\n```\r\nimport jax.numpy as np\r\nimport numpyro\r\nimport numpyro.distributions as dist\r\nfrom numpyro.handlers import trace, seed\r\n\r\ndef model():\r\n with numpyro.plate('plate', 10):\r\n numpyro.sample('x', dist.Normal(np.zeros(10), 1))\r\n\r\ntrace(seed(model, 0)).get_trace()['x']['value'].shape\r\n```\r\nshould return `(10,)` but currently, it returns `(10, 10)`.\r\n\r\ncc @neerajprad \n", "before_files": [{"content": "from collections import namedtuple\nimport functools\n\nimport jax\nfrom jax import lax\n\nimport numpyro\nfrom numpyro.distributions.discrete import PRNGIdentity\n\n_PYRO_STACK = []\n\n\nCondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])\n\n\ndef apply_stack(msg):\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n if msg['value'] is None:\n if msg['type'] == 'sample':\n msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],\n sample_intermediates=True,\n **msg['kwargs'])\n else:\n msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer-1:]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, *args, **kwargs):\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef sample(name, fn, obs=None, random_state=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n .. note::\n By design, `sample` primitive is meant to be used inside a NumPyro model.\n Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n state to `fn`. In those situations, `random_state` keyword will take no\n effect.\n\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n :param jax.random.PRNGKey random_state: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return fn(random_state=random_state, sample_shape=sample_shape)\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'sample',\n 'name': name,\n 'fn': fn,\n 'args': (),\n 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n 'intermediates': [],\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.experimental.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.\n\n :param str name: name of site.\n :param numpy.ndarray init_value: initial value specified by the user. Note that\n the onus of using this to initialize the optimizer is on the user /\n inference algorithm, since there is no global parameter store in\n NumPyro.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return init_value\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'param',\n 'name': name,\n 'fn': identity,\n 'args': (init_value,),\n 'kwargs': kwargs,\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.experimental.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + '$params'\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError('Valid value for `input_size` needed to initialize.')\n rng = numpyro.sample(name + '$rng', PRNGIdentity())\n _, nn_params = nn_init(rng, input_shape)\n param(module_key, nn_params)\n return jax.partial(nn_apply, nn_params)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the leftmost available dim\n is allocated.\n \"\"\"\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n self.size = size\n self.subsample_size = size if subsample_size is None else subsample_size\n if dim is not None and dim >= 0:\n raise ValueError('dim arg must be negative.')\n self.dim = dim\n self._validate_and_set_dim()\n super(plate, self).__init__()\n\n def _validate_and_set_dim(self):\n msg = {\n 'type': 'plate',\n 'fn': identity,\n 'name': self.name,\n 'args': (None,),\n 'kwargs': {},\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n apply_stack(msg)\n cond_indep_stack = msg['cond_indep_stack']\n occupied_dims = {f.dim for f in cond_indep_stack}\n dim = -1\n while True:\n if dim not in occupied_dims:\n break\n dim -= 1\n if self.dim is None:\n self.dim = dim\n else:\n assert self.dim not in occupied_dims\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n batch_shape = self._get_batch_shape(cond_indep_stack)\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n msg['scale'] = msg['scale'] * self.size / self.subsample_size\n", "path": "numpyro/primitives.py"}], "after_files": [{"content": "from collections import namedtuple\nimport functools\n\nimport jax\nfrom jax import lax\n\nimport numpyro\nfrom numpyro.distributions.discrete import PRNGIdentity\n\n_PYRO_STACK = []\n\n\nCondIndepStackFrame = namedtuple('CondIndepStackFrame', ['name', 'dim', 'size'])\n\n\ndef apply_stack(msg):\n pointer = 0\n for pointer, handler in enumerate(reversed(_PYRO_STACK)):\n handler.process_message(msg)\n # When a Messenger sets the \"stop\" field of a message,\n # it prevents any Messengers above it on the stack from being applied.\n if msg.get(\"stop\"):\n break\n if msg['value'] is None:\n if msg['type'] == 'sample':\n msg['value'], msg['intermediates'] = msg['fn'](*msg['args'],\n sample_intermediates=True,\n **msg['kwargs'])\n else:\n msg['value'] = msg['fn'](*msg['args'], **msg['kwargs'])\n\n # A Messenger that sets msg[\"stop\"] == True also prevents application\n # of postprocess_message by Messengers above it on the stack\n # via the pointer variable from the process_message loop\n for handler in _PYRO_STACK[-pointer-1:]:\n handler.postprocess_message(msg)\n return msg\n\n\nclass Messenger(object):\n def __init__(self, fn=None):\n self.fn = fn\n functools.update_wrapper(self, fn, updated=[])\n\n def __enter__(self):\n _PYRO_STACK.append(self)\n\n def __exit__(self, *args, **kwargs):\n assert _PYRO_STACK[-1] is self\n _PYRO_STACK.pop()\n\n def process_message(self, msg):\n pass\n\n def postprocess_message(self, msg):\n pass\n\n def __call__(self, *args, **kwargs):\n with self:\n return self.fn(*args, **kwargs)\n\n\ndef sample(name, fn, obs=None, random_state=None, sample_shape=()):\n \"\"\"\n Returns a random sample from the stochastic function `fn`. This can have\n additional side effects when wrapped inside effect handlers like\n :class:`~numpyro.handlers.substitute`.\n\n .. note::\n By design, `sample` primitive is meant to be used inside a NumPyro model.\n Then :class:`~numpyro.handlers.seed` handler is used to inject a random\n state to `fn`. In those situations, `random_state` keyword will take no\n effect.\n\n :param str name: name of the sample site\n :param fn: Python callable\n :param numpy.ndarray obs: observed value\n :param jax.random.PRNGKey random_state: an optional random key for `fn`.\n :param sample_shape: Shape of samples to be drawn.\n :return: sample from the stochastic `fn`.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return fn(random_state=random_state, sample_shape=sample_shape)\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'sample',\n 'name': name,\n 'fn': fn,\n 'args': (),\n 'kwargs': {'random_state': random_state, 'sample_shape': sample_shape},\n 'value': obs,\n 'scale': 1.0,\n 'is_observed': obs is not None,\n 'intermediates': [],\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef param(name, init_value=None, **kwargs):\n \"\"\"\n Annotate the given site as an optimizable parameter for use with\n :mod:`jax.experimental.optimizers`. For an example of how `param` statements\n can be used in inference algorithms, refer to :func:`~numpyro.svi.svi`.\n\n :param str name: name of site.\n :param numpy.ndarray init_value: initial value specified by the user. Note that\n the onus of using this to initialize the optimizer is on the user /\n inference algorithm, since there is no global parameter store in\n NumPyro.\n :return: value for the parameter. Unless wrapped inside a\n handler like :class:`~numpyro.handlers.substitute`, this will simply\n return the initial value.\n \"\"\"\n # if there are no active Messengers, we just draw a sample and return it as expected:\n if not _PYRO_STACK:\n return init_value\n\n # Otherwise, we initialize a message...\n initial_msg = {\n 'type': 'param',\n 'name': name,\n 'fn': identity,\n 'args': (init_value,),\n 'kwargs': kwargs,\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n\n # ...and use apply_stack to send it to the Messengers\n msg = apply_stack(initial_msg)\n return msg['value']\n\n\ndef module(name, nn, input_shape=None):\n \"\"\"\n Declare a :mod:`~jax.experimental.stax` style neural network inside a\n model so that its parameters are registered for optimization via\n :func:`~numpyro.primitives.param` statements.\n\n :param str name: name of the module to be registered.\n :param tuple nn: a tuple of `(init_fn, apply_fn)` obtained by a :mod:`~jax.experimental.stax`\n constructor function.\n :param tuple input_shape: shape of the input taken by the\n neural network.\n :return: a `apply_fn` with bound parameters that takes an array\n as an input and returns the neural network transformed output\n array.\n \"\"\"\n module_key = name + '$params'\n nn_init, nn_apply = nn\n nn_params = param(module_key)\n if nn_params is None:\n if input_shape is None:\n raise ValueError('Valid value for `input_size` needed to initialize.')\n rng = numpyro.sample(name + '$rng', PRNGIdentity())\n _, nn_params = nn_init(rng, input_shape)\n param(module_key, nn_params)\n return jax.partial(nn_apply, nn_params)\n\n\nclass plate(Messenger):\n \"\"\"\n Construct for annotating conditionally independent variables. Within a\n `plate` context manager, `sample` sites will be automatically broadcasted to\n the size of the plate. Additionally, a scale factor might be applied by\n certain inference algorithms if `subsample_size` is specified.\n\n :param str name: Name of the plate.\n :param int size: Size of the plate.\n :param int subsample_size: Optional argument denoting the size of the mini-batch.\n This can be used to apply a scaling factor by inference algorithms. e.g.\n when computing ELBO using a mini-batch.\n :param int dim: Optional argument to specify which dimension in the tensor\n is used as the plate dim. If `None` (default), the leftmost available dim\n is allocated.\n \"\"\"\n def __init__(self, name, size, subsample_size=None, dim=None):\n self.name = name\n self.size = size\n self.subsample_size = size if subsample_size is None else subsample_size\n if dim is not None and dim >= 0:\n raise ValueError('dim arg must be negative.')\n self.dim = dim\n self._validate_and_set_dim()\n super(plate, self).__init__()\n\n def _validate_and_set_dim(self):\n msg = {\n 'type': 'plate',\n 'fn': identity,\n 'name': self.name,\n 'args': (None,),\n 'kwargs': {},\n 'value': None,\n 'scale': 1.0,\n 'cond_indep_stack': [],\n }\n apply_stack(msg)\n cond_indep_stack = msg['cond_indep_stack']\n occupied_dims = {f.dim for f in cond_indep_stack}\n dim = -1\n while True:\n if dim not in occupied_dims:\n break\n dim -= 1\n if self.dim is None:\n self.dim = dim\n else:\n assert self.dim not in occupied_dims\n\n @staticmethod\n def _get_batch_shape(cond_indep_stack):\n n_dims = max(-f.dim for f in cond_indep_stack)\n batch_shape = [1] * n_dims\n for f in cond_indep_stack:\n batch_shape[f.dim] = f.size\n return tuple(batch_shape)\n\n def process_message(self, msg):\n cond_indep_stack = msg['cond_indep_stack']\n frame = CondIndepStackFrame(self.name, self.dim, self.subsample_size)\n cond_indep_stack.append(frame)\n expected_shape = self._get_batch_shape(cond_indep_stack)\n dist_batch_shape = msg['fn'].batch_shape if msg['type'] == 'sample' else ()\n overlap_idx = len(expected_shape) - len(dist_batch_shape)\n if overlap_idx < 0:\n raise ValueError('Expected dimensions within plate = {}, which is less than the '\n 'distribution\\'s batch shape = {}.'.format(len(expected_shape), len(dist_batch_shape)))\n trailing_shape = expected_shape[overlap_idx:]\n # e.g. distribution with batch shape (1, 5) cannot be broadcast to (5, 5)\n broadcast_shape = lax.broadcast_shapes(trailing_shape, dist_batch_shape)\n if broadcast_shape != dist_batch_shape:\n raise ValueError('Distribution batch shape = {} cannot be broadcast up to {}. '\n 'Consider using unbatched distributions.'\n .format(dist_batch_shape, broadcast_shape))\n batch_shape = expected_shape[:overlap_idx]\n if 'sample_shape' in msg['kwargs']:\n batch_shape = lax.broadcast_shapes(msg['kwargs']['sample_shape'], batch_shape)\n msg['kwargs']['sample_shape'] = batch_shape\n msg['scale'] = msg['scale'] * self.size / self.subsample_size\n", "path": "numpyro/primitives.py"}]}
| 3,005 | 358 |
gh_patches_debug_22365
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1603
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: BSR_DE seems not to work anymore
### I Have A Problem With:
A specific source
### What's Your Problem
Source BSR_DE is not importing anything.
This is the Path to the Calendar
https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_iCal_ajax&abf_strasse=Chaukenstr.,%2012524%20Berlin%20(Treptow-K%C3%B6penick)&abf_hausnr=3X&tab_control=Liste&abf_config_weihnachtsbaeume=&abf_config_restmuell=on&abf_config_biogut=on&abf_config_wertstoffe=on&abf_config_laubtonne=on
this is the Ical it should get.
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//www.bsr.de//NONSGML kigkonsult.se iCalcreator 2.41.71//
CALSCALE:GREGORIAN
UID:65616338-3738-4064-a633-326437643934
BEGIN:VEVENT
UID:32386261-6536-4130-a166-356138626438
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Hausmüll
DTSTART;TZID=Europe/Berlin:20231228T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Hausmüll
END:VEVENT
BEGIN:VEVENT
UID:37383565-6638-4232-a331-633133633061
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Biogut
DTSTART;TZID=Europe/Berlin:20240102T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Biogut
END:VEVENT
BEGIN:VEVENT
UID:30626662-3731-4963-b836-653262613233
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)
DTSTART;TZID=Europe/Berlin:20240109T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Wertstoffe (Abholung durch ALBA)
END:VEVENT
BEGIN:VEVENT
UID:64356336-6132-4464-a339-646533373961
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Hausmüll
DTSTART;TZID=Europe/Berlin:20240110T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Hausmüll
END:VEVENT
BEGIN:VEVENT
UID:61636636-6661-4834-b063-653566636636
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Biogut
DTSTART;TZID=Europe/Berlin:20240115T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Biogut
END:VEVENT
BEGIN:VEVENT
UID:32326238-3862-4533-b361-353033303935
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)
DTSTART;TZID=Europe/Berlin:20240123T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Wertstoffe (Abholung durch ALBA)
END:VEVENT
BEGIN:VEVENT
UID:30396632-3239-4764-b439-336132306661
DTSTAMP:20231228T151659Z
DESCRIPTION:Abholung Hausmüll
DTSTART;TZID=Europe/Berlin:20240124T000000
LOCATION:Chaukenstr. 3X\,
SUMMARY:Abholung Hausmüll
END:VEVENT
END:VCALENDAR
### Source (if relevant)
BSR_DE
### Logs
```Shell
no relevant logs
```
### Relevant Configuration
```YAML
- name: bsr_de
args:
abf_strasse: Chaukenstr., 12524 Berlin (Treptow-Köpenick)
abf_hausnr: 3*
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py`
Content:
```
1 import urllib.parse
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "Berliner Stadtreinigungsbetriebe"
8 DESCRIPTION = "Source for Berliner Stadtreinigungsbetriebe waste collection."
9 URL = "https://bsr.de"
10 TEST_CASES = {
11 "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)": {
12 "abf_strasse": "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)",
13 "abf_hausnr": 1,
14 },
15 "Am Ried, 13467 Berlin (Reinickendorf)": {
16 "abf_strasse": "Am Ried, 13467 Berlin (Reinickendorf)",
17 "abf_hausnr": "11G",
18 },
19 }
20
21
22 def myquote(s):
23 # bsr uses strange quoting
24 return urllib.parse.quote(s, safe=",()")
25
26
27 class Source:
28 def __init__(self, abf_strasse, abf_hausnr):
29 self._abf_strasse = abf_strasse
30 self._abf_hausnr = abf_hausnr
31 self._ics = ICS(offset=1)
32
33 def fetch(self):
34 # get cookie
35 r = requests.get("https://www.bsr.de/abfuhrkalender-20520.php")
36 cookies = r.cookies
37
38 # get street name only (without PLZ)
39 street = self._abf_strasse.split(",")[0]
40
41 # start search using string name (without PLZ)
42 args = {"script": "dynamic_search", "step": 1, "q": street}
43 r = requests.get(
44 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
45 )
46
47 # retrieve house number list
48 args = {"script": "dynamic_search", "step": 2, "q": self._abf_strasse}
49 r = requests.get(
50 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
51 )
52
53 args = {
54 "abf_strasse": street,
55 "abf_hausnr": self._abf_hausnr,
56 "tab_control": "Jahr",
57 "abf_config_weihnachtsbaeume": "",
58 "abf_config_restmuell": "on",
59 "abf_config_biogut": "on",
60 "abf_config_wertstoffe": "on",
61 "abf_config_laubtonne": "on",
62 # "abf_selectmonth": "5 2020",
63 # "abf_datepicker": "28.04.2020",
64 # "listitems":7,
65 }
66 r = requests.post(
67 "https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax",
68 data=args,
69 cookies=cookies,
70 )
71
72 args = {
73 "script": "dynamic_iCal_ajax",
74 "abf_strasse": self._abf_strasse,
75 "abf_hausnr": self._abf_hausnr,
76 "tab_control": "Jahr",
77 "abf_config_weihnachtsbaeume": "",
78 "abf_config_restmuell": "on",
79 "abf_config_biogut": "on",
80 "abf_config_wertstoffe": "on",
81 "abf_config_laubtonne": "on",
82 # "abf_selectmonth": "5 2020",
83 # "listitems":7,
84 }
85
86 # create url using private url encoding
87 encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
88 url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
89 r = requests.get(url, cookies=cookies)
90
91 # parse ics file
92 dates = self._ics.convert(r.text)
93
94 entries = []
95 for d in dates:
96 entries.append(Collection(d[0], d[1]))
97 return entries
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
@@ -1,4 +1,5 @@
import urllib.parse
+import datetime
import requests
from waste_collection_schedule import Collection # type: ignore[attr-defined]
@@ -91,6 +92,22 @@
# parse ics file
dates = self._ics.convert(r.text)
+
+ now = datetime.datetime.now()
+ # in nov/dec already fetch a monthly ics for january
+ # as yearly ics isn't available until the 1. of january.
+ if now.month in [11, 12]:
+ args["tab_control"] = "Monat"
+ args["abf_selectmonth"] = "1 " + str(now.year + 1)
+
+ # create url using private url encoding
+ encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
+ url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
+ r = requests.get(url, cookies=cookies)
+
+ # parse ics file
+ dates.extend (self._ics.convert(r.text) )
+
entries = []
for d in dates:
entries.append(Collection(d[0], d[1]))
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n@@ -1,4 +1,5 @@\n import urllib.parse\n+import datetime\n \n import requests\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n@@ -91,6 +92,22 @@\n # parse ics file\n dates = self._ics.convert(r.text)\n \n+\n+ now = datetime.datetime.now()\n+ # in nov/dec already fetch a monthly ics for january\n+ # as yearly ics isn't available until the 1. of january.\n+ if now.month in [11, 12]:\n+ args[\"tab_control\"] = \"Monat\"\n+ args[\"abf_selectmonth\"] = \"1 \" + str(now.year + 1)\n+\n+ # create url using private url encoding\n+ encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n+ url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n+ r = requests.get(url, cookies=cookies)\n+\n+ # parse ics file\n+ dates.extend (self._ics.convert(r.text) )\n+\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n", "issue": "[Bug]: BSR_DE seems not to work anymore\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nSource BSR_DE is not importing anything.\r\n\r\nThis is the Path to the Calendar\r\nhttps://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_iCal_ajax&abf_strasse=Chaukenstr.,%2012524%20Berlin%20(Treptow-K%C3%B6penick)&abf_hausnr=3X&tab_control=Liste&abf_config_weihnachtsbaeume=&abf_config_restmuell=on&abf_config_biogut=on&abf_config_wertstoffe=on&abf_config_laubtonne=on\r\n\r\nthis is the Ical it should get.\r\n\r\nBEGIN:VCALENDAR\r\nVERSION:2.0\r\nPRODID:-//www.bsr.de//NONSGML kigkonsult.se iCalcreator 2.41.71//\r\nCALSCALE:GREGORIAN\r\nUID:65616338-3738-4064-a633-326437643934\r\nBEGIN:VEVENT\r\nUID:32386261-6536-4130-a166-356138626438\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Hausm\u00fcll\r\nDTSTART;TZID=Europe/Berlin:20231228T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Hausm\u00fcll\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:37383565-6638-4232-a331-633133633061\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Biogut\r\nDTSTART;TZID=Europe/Berlin:20240102T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Biogut\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:30626662-3731-4963-b836-653262613233\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)\r\nDTSTART;TZID=Europe/Berlin:20240109T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Wertstoffe (Abholung durch ALBA)\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:64356336-6132-4464-a339-646533373961\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Hausm\u00fcll\r\nDTSTART;TZID=Europe/Berlin:20240110T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Hausm\u00fcll\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:61636636-6661-4834-b063-653566636636\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Biogut\r\nDTSTART;TZID=Europe/Berlin:20240115T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Biogut\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:32326238-3862-4533-b361-353033303935\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Wertstoffe (Abholung durch ALBA)\r\nDTSTART;TZID=Europe/Berlin:20240123T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Wertstoffe (Abholung durch ALBA)\r\nEND:VEVENT\r\nBEGIN:VEVENT\r\nUID:30396632-3239-4764-b439-336132306661\r\nDTSTAMP:20231228T151659Z\r\nDESCRIPTION:Abholung Hausm\u00fcll\r\nDTSTART;TZID=Europe/Berlin:20240124T000000\r\nLOCATION:Chaukenstr. 3X\\,\r\nSUMMARY:Abholung Hausm\u00fcll\r\nEND:VEVENT\r\nEND:VCALENDAR\n\n### Source (if relevant)\n\nBSR_DE\n\n### Logs\n\n```Shell\nno relevant logs\n```\n\n\n### Relevant Configuration\n\n```YAML\n- name: bsr_de\r\n args:\r\n abf_strasse: Chaukenstr., 12524 Berlin (Treptow-K\u00f6penick)\r\n abf_hausnr: 3*\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import urllib.parse\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Berliner Stadtreinigungsbetriebe\"\nDESCRIPTION = \"Source for Berliner Stadtreinigungsbetriebe waste collection.\"\nURL = \"https://bsr.de\"\nTEST_CASES = {\n \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\": {\n \"abf_strasse\": \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\",\n \"abf_hausnr\": 1,\n },\n \"Am Ried, 13467 Berlin (Reinickendorf)\": {\n \"abf_strasse\": \"Am Ried, 13467 Berlin (Reinickendorf)\",\n \"abf_hausnr\": \"11G\",\n },\n}\n\n\ndef myquote(s):\n # bsr uses strange quoting\n return urllib.parse.quote(s, safe=\",()\")\n\n\nclass Source:\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n self._ics = ICS(offset=1)\n\n def fetch(self):\n # get cookie\n r = requests.get(\"https://www.bsr.de/abfuhrkalender-20520.php\")\n cookies = r.cookies\n\n # get street name only (without PLZ)\n street = self._abf_strasse.split(\",\")[0]\n\n # start search using string name (without PLZ)\n args = {\"script\": \"dynamic_search\", \"step\": 1, \"q\": street}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n # retrieve house number list\n args = {\"script\": \"dynamic_search\", \"step\": 2, \"q\": self._abf_strasse}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n args = {\n \"abf_strasse\": street,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"abf_datepicker\": \"28.04.2020\",\n # \"listitems\":7,\n }\n r = requests.post(\n \"https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax\",\n data=args,\n cookies=cookies,\n )\n\n args = {\n \"script\": \"dynamic_iCal_ajax\",\n \"abf_strasse\": self._abf_strasse,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"listitems\":7,\n }\n\n # create url using private url encoding\n encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n r = requests.get(url, cookies=cookies)\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py"}], "after_files": [{"content": "import urllib.parse\nimport datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Berliner Stadtreinigungsbetriebe\"\nDESCRIPTION = \"Source for Berliner Stadtreinigungsbetriebe waste collection.\"\nURL = \"https://bsr.de\"\nTEST_CASES = {\n \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\": {\n \"abf_strasse\": \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\",\n \"abf_hausnr\": 1,\n },\n \"Am Ried, 13467 Berlin (Reinickendorf)\": {\n \"abf_strasse\": \"Am Ried, 13467 Berlin (Reinickendorf)\",\n \"abf_hausnr\": \"11G\",\n },\n}\n\n\ndef myquote(s):\n # bsr uses strange quoting\n return urllib.parse.quote(s, safe=\",()\")\n\n\nclass Source:\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n self._ics = ICS(offset=1)\n\n def fetch(self):\n # get cookie\n r = requests.get(\"https://www.bsr.de/abfuhrkalender-20520.php\")\n cookies = r.cookies\n\n # get street name only (without PLZ)\n street = self._abf_strasse.split(\",\")[0]\n\n # start search using string name (without PLZ)\n args = {\"script\": \"dynamic_search\", \"step\": 1, \"q\": street}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n # retrieve house number list\n args = {\"script\": \"dynamic_search\", \"step\": 2, \"q\": self._abf_strasse}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n args = {\n \"abf_strasse\": street,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"abf_datepicker\": \"28.04.2020\",\n # \"listitems\":7,\n }\n r = requests.post(\n \"https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax\",\n data=args,\n cookies=cookies,\n )\n\n args = {\n \"script\": \"dynamic_iCal_ajax\",\n \"abf_strasse\": self._abf_strasse,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"listitems\":7,\n }\n\n # create url using private url encoding\n encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n r = requests.get(url, cookies=cookies)\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n\n now = datetime.datetime.now()\n # in nov/dec already fetch a monthly ics for january\n # as yearly ics isn't available until the 1. of january.\n if now.month in [11, 12]:\n args[\"tab_control\"] = \"Monat\"\n args[\"abf_selectmonth\"] = \"1 \" + str(now.year + 1)\n\n # create url using private url encoding\n encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n r = requests.get(url, cookies=cookies)\n\n # parse ics file\n dates.extend (self._ics.convert(r.text) )\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py"}]}
| 2,878 | 351 |
gh_patches_debug_21910
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-61115
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
native symbolicated 'const' functions incorrectly trimmed
### Environment
self-hosted (https://develop.sentry.dev/self-hosted/)
### Steps to Reproduce
Have the following verbose native frame `ShellCorona::screenInvariants() const [clone .cold] (shellcorona.cpp:847)`
The event has platform:native and the project has platform:native-qt in case that makes a difference
Full json blob of the frame:
```
{
"function": "const",
"raw_function": "ShellCorona::screenInvariants() const [clone .cold]",
"symbol": "_ZNK11ShellCorona16screenInvariantsEv.cold",
"package": "/usr/bin/plasmashell",
"filename": "shellcorona.cpp",
"abs_path": "obj-x86_64-linux-gnu/shell/shell/shellcorona.cpp",
"lineno": 847,
"in_app": false,
"vars": {},
"data": {
"orig_in_app": -1,
"symbolicator_status": "symbolicated"
},
"instruction_addr": "0x558ba221ba82"
},
```
### Expected Result
Trimmed name should be something like `ShellCorona::screenInvariants (shellcorona.cpp:847)`
### Actual Result
Trimmed name is `const (shellcorona.cpp:847)`
### Product Area
Issues
### Link
_No response_
### DSN
_No response_
### Version
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/stacktraces/functions.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4 from typing import Any
5 from urllib.parse import urlparse
6
7 from django.core.exceptions import ValidationError
8 from django.core.validators import URLValidator
9
10 from sentry.interfaces.stacktrace import Frame
11 from sentry.stacktraces.platform import get_behavior_family_for_platform
12 from sentry.utils.safe import setdefault_path
13
14 _windecl_hash = re.compile(r"^@?(.*?)@[0-9]+$")
15 _rust_hash = re.compile(r"::h[a-z0-9]{16}$")
16 _gnu_version = re.compile(r"@@?GLIBC_([0-9.]+)$")
17 _cpp_trailer_re = re.compile(r"(\bconst\b|&)$")
18 _rust_blanket_re = re.compile(r"^([A-Z] as )")
19 _lambda_re = re.compile(
20 r"""(?x)
21 # gcc
22 (?:
23 \{
24 lambda\(.*?\)\#\d+
25 \}
26 ) |
27 # msvc
28 (?:
29 \blambda_[a-f0-9]{32}\b
30 ) |
31 # clang
32 (?:
33 \$_\d+\b
34 )
35 """
36 )
37 _anon_namespace_re = re.compile(
38 r"""(?x)
39 \?A0x[a-f0-9]{8}::
40 """
41 )
42
43
44 PAIRS = {"(": ")", "{": "}", "[": "]", "<": ">"}
45
46
47 def replace_enclosed_string(s, start, end, replacement=None):
48 if start not in s:
49 return s
50
51 depth = 0
52
53 rv = []
54 pair_start = None
55 for idx, char in enumerate(s):
56 if char == start:
57 if depth == 0:
58 pair_start = idx
59 depth += 1
60 elif char == end:
61 depth -= 1
62 if depth == 0:
63 if replacement is not None:
64 if callable(replacement):
65 rv.append(replacement(s[pair_start + 1 : idx], pair_start))
66 else:
67 rv.append(replacement)
68 elif depth == 0:
69 rv.append(char)
70
71 return "".join(rv)
72
73
74 def split_func_tokens(s):
75 buf = []
76 rv = []
77 stack = []
78 end = 0
79
80 for idx, char in enumerate(s):
81 if char in PAIRS:
82 stack.append(PAIRS[char])
83 elif stack and char == stack[-1]:
84 stack.pop()
85 if not stack:
86 buf.append(s[end : idx + 1])
87 end = idx + 1
88 elif not stack:
89 if char.isspace():
90 if buf:
91 rv.append(buf)
92 buf = []
93 else:
94 buf.append(s[end : idx + 1])
95 end = idx + 1
96
97 if buf:
98 rv.append(buf)
99
100 return ["".join(x) for x in rv]
101
102
103 def trim_function_name(function, platform, normalize_lambdas=True):
104 """Given a function value from the frame's function attribute this returns
105 a trimmed version that can be stored in `function_name`. This is only used
106 if the client did not supply a value itself already.
107 """
108 if platform == "csharp":
109 return trim_csharp_function_name(function)
110 if get_behavior_family_for_platform(platform) == "native":
111 return trim_native_function_name(function, platform, normalize_lambdas=normalize_lambdas)
112 return function
113
114
115 def trim_csharp_function_name(function):
116 """This trims off signatures from C# frames. This takes advantage of the
117 Unity not emitting any return values and using a space before the argument
118 list.
119
120 Note that there is disagreement between Unity and the main .NET SDK about
121 the function names. The Unity SDK emits the entire function with module
122 in the `function` name similar to native, the .NET SDK emits it in individual
123 parts of the frame.
124 """
125 return function.split(" (", 1)[0]
126
127
128 def trim_native_function_name(function, platform, normalize_lambdas=True):
129 if function in ("<redacted>", "<unknown>"):
130 return function
131
132 original_function = function
133 function = function.strip()
134
135 # Ensure we don't operate on objc functions
136 if function.startswith(("[", "+[", "-[")):
137 return function
138
139 # Chop off C++ trailers
140 while True:
141 match = _cpp_trailer_re.search(function)
142 if match is None:
143 break
144 function = function[: match.start()].rstrip()
145
146 # Because operator<< really screws with our balancing, so let's work
147 # around that by replacing it with a character we do not observe in
148 # `split_func_tokens` or `replace_enclosed_string`.
149 function = (
150 function.replace("operator<<", "operator⟨⟨")
151 .replace("operator<", "operator⟨")
152 .replace("operator()", "operator◯")
153 .replace("operator->", "operator⟿")
154 .replace(" -> ", " ⟿ ")
155 .replace("`anonymous namespace'", "〔anonymousnamespace〕")
156 )
157
158 # normalize C++ lambdas. This is necessary because different
159 # compilers use different rules for now to name a lambda and they are
160 # all quite inconsistent. This does not give us perfect answers to
161 # this problem but closer. In particular msvc will call a lambda
162 # something like `lambda_deadbeefeefffeeffeeff` whereas clang for
163 # instance will name it `main::$_0` which will tell us in which outer
164 # function it was declared.
165 if normalize_lambdas:
166 function = _lambda_re.sub("lambda", function)
167
168 # Normalize MSVC anonymous namespaces from inline functions. For inline
169 # functions, the compiler inconsistently renders anonymous namespaces with
170 # their hash. For regular functions, "`anonymous namespace'" is used.
171 # The regular expression matches the trailing "::" to avoid accidental
172 # replacement in mangled function names.
173 if normalize_lambdas:
174 function = _anon_namespace_re.sub("〔anonymousnamespace〕::", function)
175
176 # Remove the arguments if there is one.
177 def process_args(value, start):
178 value = value.strip()
179 if value in ("anonymous namespace", "operator"):
180 return "(%s)" % value
181 return ""
182
183 function = replace_enclosed_string(function, "(", ")", process_args)
184
185 # Resolve generic types, but special case rust which uses things like
186 # <Foo as Bar>::baz to denote traits.
187 def process_generics(value, start):
188 # Special case for lambdas
189 if value == "lambda" or _lambda_re.match(value):
190 return "<%s>" % value
191
192 if start > 0:
193 return "<T>"
194
195 # Rust special cases
196 value = _rust_blanket_re.sub("", value) # prefer trait for blanket impls
197 value = replace_enclosed_string(value, "<", ">", process_generics)
198 return value.split(" as ", 1)[0]
199
200 function = replace_enclosed_string(function, "<", ">", process_generics)
201
202 # Remove special `[clone .foo]` annotations for cloned/split functions
203 def process_brackets(value, start):
204 if value.startswith("clone ."):
205 return ""
206 return "[%s]" % value
207
208 function = replace_enclosed_string(function, "[", "]", process_brackets)
209
210 is_thunk = "thunk for " in function # swift
211
212 tokens = split_func_tokens(function)
213
214 # MSVC demangles generic operator functions with a space between the
215 # function name and the generics. Ensure that those two components both end
216 # up in the function name.
217 if len(tokens) > 1 and tokens[-1] == "<T>":
218 tokens.pop()
219 tokens[-1] += " <T>"
220
221 # find the token which is the function name. Since we chopped of C++
222 # trailers there are only two cases we care about: the token left to
223 # the -> return marker which is for instance used in Swift and if that
224 # is not found, consider the last token to be the function.
225 #
226 # ["unsigned", "int", "whatever"] -> whatever
227 # ["@objc", "whatever", "->", "int"] -> whatever
228 try:
229 func_token = tokens[tokens.index("⟿") - 1]
230 except ValueError:
231 if tokens:
232 func_token = tokens[-1]
233 else:
234 func_token = None
235
236 if func_token:
237 if func_token.startswith("@") and platform in ("cocoa", "swift"):
238 # Found a Swift attribute instead of a function name, must be an
239 # anonymous function
240 func_token = ("thunk for " if is_thunk else "") + "closure"
241 function = (
242 func_token.replace("⟨", "<")
243 .replace("◯", "()")
244 .replace("⟿", "->")
245 .replace("〔anonymousnamespace〕", "`anonymous namespace'")
246 )
247
248 # This really should never happen
249 else:
250 function = original_function
251
252 # trim off rust markers
253 function = _rust_hash.sub("", function)
254
255 # trim off gnu symbol versioning info
256 function = _gnu_version.sub("", function)
257
258 # trim off windows decl markers
259 return _windecl_hash.sub("\\1", function)
260
261
262 def get_function_name_for_frame(frame, platform=None):
263 """Given a frame object or dictionary this returns the actual function
264 name trimmed.
265 """
266 if hasattr(frame, "get_raw_data"):
267 frame = frame.get_raw_data()
268
269 # if there is a raw function, prioritize the function unchanged
270 if frame.get("raw_function"):
271 return frame.get("function")
272
273 # otherwise trim the function on demand
274 rv = frame.get("function")
275 if rv:
276 return trim_function_name(rv, frame.get("platform") or platform)
277
278
279 def get_source_link_for_frame(frame: Frame) -> str | None:
280 """If source_link points to a GitHub raw content link, process it so that
281 we can return the GitHub equivalent with the line number, and use it as a
282 stacktrace link. Otherwise, return the link as is.
283 """
284 source_link = getattr(frame, "source_link", None)
285
286 try:
287 URLValidator()(source_link)
288 except ValidationError:
289 return None
290
291 parse_result = urlparse(source_link)
292 if parse_result.netloc == "raw.githubusercontent.com":
293 path_parts = parse_result.path.split("/")
294 if path_parts[0] == "":
295 # the path starts with a "/" so the first element is empty string
296 del path_parts[0]
297
298 # at minimum, the path must have an author/org, a repo, and a file
299 if len(path_parts) >= 3:
300 source_link = "https://www.github.com/" + path_parts[0] + "/" + path_parts[1] + "/blob"
301 for remaining_part in path_parts[2:]:
302 source_link += "/" + remaining_part
303 if getattr(frame, "lineno", None):
304 source_link += "#L" + str(frame.lineno)
305 return source_link
306
307
308 def set_in_app(frame: dict[str, Any], value: bool) -> None:
309 """Set the value of in_app in the frame to the given value."""
310 orig_in_app = frame.get("in_app")
311 if orig_in_app == value:
312 return
313
314 orig_in_app = int(orig_in_app) if orig_in_app is not None else -1
315 setdefault_path(frame, "data", "orig_in_app", value=orig_in_app)
316 frame["in_app"] = value
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/stacktraces/functions.py b/src/sentry/stacktraces/functions.py
--- a/src/sentry/stacktraces/functions.py
+++ b/src/sentry/stacktraces/functions.py
@@ -136,6 +136,14 @@
if function.startswith(("[", "+[", "-[")):
return function
+ # Remove special `[clone .foo]` annotations for cloned/split functions
+ def process_brackets(value, start):
+ if value.startswith("clone ."):
+ return ""
+ return "[%s]" % value
+
+ function = replace_enclosed_string(function, "[", "]", process_brackets).rstrip()
+
# Chop off C++ trailers
while True:
match = _cpp_trailer_re.search(function)
@@ -199,14 +207,6 @@
function = replace_enclosed_string(function, "<", ">", process_generics)
- # Remove special `[clone .foo]` annotations for cloned/split functions
- def process_brackets(value, start):
- if value.startswith("clone ."):
- return ""
- return "[%s]" % value
-
- function = replace_enclosed_string(function, "[", "]", process_brackets)
-
is_thunk = "thunk for " in function # swift
tokens = split_func_tokens(function)
|
{"golden_diff": "diff --git a/src/sentry/stacktraces/functions.py b/src/sentry/stacktraces/functions.py\n--- a/src/sentry/stacktraces/functions.py\n+++ b/src/sentry/stacktraces/functions.py\n@@ -136,6 +136,14 @@\n if function.startswith((\"[\", \"+[\", \"-[\")):\n return function\n \n+ # Remove special `[clone .foo]` annotations for cloned/split functions\n+ def process_brackets(value, start):\n+ if value.startswith(\"clone .\"):\n+ return \"\"\n+ return \"[%s]\" % value\n+\n+ function = replace_enclosed_string(function, \"[\", \"]\", process_brackets).rstrip()\n+\n # Chop off C++ trailers\n while True:\n match = _cpp_trailer_re.search(function)\n@@ -199,14 +207,6 @@\n \n function = replace_enclosed_string(function, \"<\", \">\", process_generics)\n \n- # Remove special `[clone .foo]` annotations for cloned/split functions\n- def process_brackets(value, start):\n- if value.startswith(\"clone .\"):\n- return \"\"\n- return \"[%s]\" % value\n-\n- function = replace_enclosed_string(function, \"[\", \"]\", process_brackets)\n-\n is_thunk = \"thunk for \" in function # swift\n \n tokens = split_func_tokens(function)\n", "issue": "native symbolicated 'const' functions incorrectly trimmed\n### Environment\n\nself-hosted (https://develop.sentry.dev/self-hosted/)\n\n### Steps to Reproduce\n\nHave the following verbose native frame `ShellCorona::screenInvariants() const [clone .cold] (shellcorona.cpp:847)`\r\n\r\nThe event has platform:native and the project has platform:native-qt in case that makes a difference\r\n\r\nFull json blob of the frame:\r\n```\r\n {\r\n \"function\": \"const\",\r\n \"raw_function\": \"ShellCorona::screenInvariants() const [clone .cold]\",\r\n \"symbol\": \"_ZNK11ShellCorona16screenInvariantsEv.cold\",\r\n \"package\": \"/usr/bin/plasmashell\",\r\n \"filename\": \"shellcorona.cpp\",\r\n \"abs_path\": \"obj-x86_64-linux-gnu/shell/shell/shellcorona.cpp\",\r\n \"lineno\": 847,\r\n \"in_app\": false,\r\n \"vars\": {},\r\n \"data\": {\r\n \"orig_in_app\": -1,\r\n \"symbolicator_status\": \"symbolicated\"\r\n },\r\n \"instruction_addr\": \"0x558ba221ba82\"\r\n },\r\n```\n\n### Expected Result\n\nTrimmed name should be something like `ShellCorona::screenInvariants (shellcorona.cpp:847)`\n\n### Actual Result\n\nTrimmed name is `const (shellcorona.cpp:847)`\n\n### Product Area\n\nIssues\n\n### Link\n\n_No response_\n\n### DSN\n\n_No response_\n\n### Version\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import Any\nfrom urllib.parse import urlparse\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import URLValidator\n\nfrom sentry.interfaces.stacktrace import Frame\nfrom sentry.stacktraces.platform import get_behavior_family_for_platform\nfrom sentry.utils.safe import setdefault_path\n\n_windecl_hash = re.compile(r\"^@?(.*?)@[0-9]+$\")\n_rust_hash = re.compile(r\"::h[a-z0-9]{16}$\")\n_gnu_version = re.compile(r\"@@?GLIBC_([0-9.]+)$\")\n_cpp_trailer_re = re.compile(r\"(\\bconst\\b|&)$\")\n_rust_blanket_re = re.compile(r\"^([A-Z] as )\")\n_lambda_re = re.compile(\n r\"\"\"(?x)\n # gcc\n (?:\n \\{\n lambda\\(.*?\\)\\#\\d+\n \\}\n ) |\n # msvc\n (?:\n \\blambda_[a-f0-9]{32}\\b\n ) |\n # clang\n (?:\n \\$_\\d+\\b\n )\n \"\"\"\n)\n_anon_namespace_re = re.compile(\n r\"\"\"(?x)\n \\?A0x[a-f0-9]{8}::\n \"\"\"\n)\n\n\nPAIRS = {\"(\": \")\", \"{\": \"}\", \"[\": \"]\", \"<\": \">\"}\n\n\ndef replace_enclosed_string(s, start, end, replacement=None):\n if start not in s:\n return s\n\n depth = 0\n\n rv = []\n pair_start = None\n for idx, char in enumerate(s):\n if char == start:\n if depth == 0:\n pair_start = idx\n depth += 1\n elif char == end:\n depth -= 1\n if depth == 0:\n if replacement is not None:\n if callable(replacement):\n rv.append(replacement(s[pair_start + 1 : idx], pair_start))\n else:\n rv.append(replacement)\n elif depth == 0:\n rv.append(char)\n\n return \"\".join(rv)\n\n\ndef split_func_tokens(s):\n buf = []\n rv = []\n stack = []\n end = 0\n\n for idx, char in enumerate(s):\n if char in PAIRS:\n stack.append(PAIRS[char])\n elif stack and char == stack[-1]:\n stack.pop()\n if not stack:\n buf.append(s[end : idx + 1])\n end = idx + 1\n elif not stack:\n if char.isspace():\n if buf:\n rv.append(buf)\n buf = []\n else:\n buf.append(s[end : idx + 1])\n end = idx + 1\n\n if buf:\n rv.append(buf)\n\n return [\"\".join(x) for x in rv]\n\n\ndef trim_function_name(function, platform, normalize_lambdas=True):\n \"\"\"Given a function value from the frame's function attribute this returns\n a trimmed version that can be stored in `function_name`. This is only used\n if the client did not supply a value itself already.\n \"\"\"\n if platform == \"csharp\":\n return trim_csharp_function_name(function)\n if get_behavior_family_for_platform(platform) == \"native\":\n return trim_native_function_name(function, platform, normalize_lambdas=normalize_lambdas)\n return function\n\n\ndef trim_csharp_function_name(function):\n \"\"\"This trims off signatures from C# frames. This takes advantage of the\n Unity not emitting any return values and using a space before the argument\n list.\n\n Note that there is disagreement between Unity and the main .NET SDK about\n the function names. The Unity SDK emits the entire function with module\n in the `function` name similar to native, the .NET SDK emits it in individual\n parts of the frame.\n \"\"\"\n return function.split(\" (\", 1)[0]\n\n\ndef trim_native_function_name(function, platform, normalize_lambdas=True):\n if function in (\"<redacted>\", \"<unknown>\"):\n return function\n\n original_function = function\n function = function.strip()\n\n # Ensure we don't operate on objc functions\n if function.startswith((\"[\", \"+[\", \"-[\")):\n return function\n\n # Chop off C++ trailers\n while True:\n match = _cpp_trailer_re.search(function)\n if match is None:\n break\n function = function[: match.start()].rstrip()\n\n # Because operator<< really screws with our balancing, so let's work\n # around that by replacing it with a character we do not observe in\n # `split_func_tokens` or `replace_enclosed_string`.\n function = (\n function.replace(\"operator<<\", \"operator\u27e8\u27e8\")\n .replace(\"operator<\", \"operator\u27e8\")\n .replace(\"operator()\", \"operator\u25ef\")\n .replace(\"operator->\", \"operator\u27ff\")\n .replace(\" -> \", \" \u27ff \")\n .replace(\"`anonymous namespace'\", \"\u3014anonymousnamespace\u3015\")\n )\n\n # normalize C++ lambdas. This is necessary because different\n # compilers use different rules for now to name a lambda and they are\n # all quite inconsistent. This does not give us perfect answers to\n # this problem but closer. In particular msvc will call a lambda\n # something like `lambda_deadbeefeefffeeffeeff` whereas clang for\n # instance will name it `main::$_0` which will tell us in which outer\n # function it was declared.\n if normalize_lambdas:\n function = _lambda_re.sub(\"lambda\", function)\n\n # Normalize MSVC anonymous namespaces from inline functions. For inline\n # functions, the compiler inconsistently renders anonymous namespaces with\n # their hash. For regular functions, \"`anonymous namespace'\" is used.\n # The regular expression matches the trailing \"::\" to avoid accidental\n # replacement in mangled function names.\n if normalize_lambdas:\n function = _anon_namespace_re.sub(\"\u3014anonymousnamespace\u3015::\", function)\n\n # Remove the arguments if there is one.\n def process_args(value, start):\n value = value.strip()\n if value in (\"anonymous namespace\", \"operator\"):\n return \"(%s)\" % value\n return \"\"\n\n function = replace_enclosed_string(function, \"(\", \")\", process_args)\n\n # Resolve generic types, but special case rust which uses things like\n # <Foo as Bar>::baz to denote traits.\n def process_generics(value, start):\n # Special case for lambdas\n if value == \"lambda\" or _lambda_re.match(value):\n return \"<%s>\" % value\n\n if start > 0:\n return \"<T>\"\n\n # Rust special cases\n value = _rust_blanket_re.sub(\"\", value) # prefer trait for blanket impls\n value = replace_enclosed_string(value, \"<\", \">\", process_generics)\n return value.split(\" as \", 1)[0]\n\n function = replace_enclosed_string(function, \"<\", \">\", process_generics)\n\n # Remove special `[clone .foo]` annotations for cloned/split functions\n def process_brackets(value, start):\n if value.startswith(\"clone .\"):\n return \"\"\n return \"[%s]\" % value\n\n function = replace_enclosed_string(function, \"[\", \"]\", process_brackets)\n\n is_thunk = \"thunk for \" in function # swift\n\n tokens = split_func_tokens(function)\n\n # MSVC demangles generic operator functions with a space between the\n # function name and the generics. Ensure that those two components both end\n # up in the function name.\n if len(tokens) > 1 and tokens[-1] == \"<T>\":\n tokens.pop()\n tokens[-1] += \" <T>\"\n\n # find the token which is the function name. Since we chopped of C++\n # trailers there are only two cases we care about: the token left to\n # the -> return marker which is for instance used in Swift and if that\n # is not found, consider the last token to be the function.\n #\n # [\"unsigned\", \"int\", \"whatever\"] -> whatever\n # [\"@objc\", \"whatever\", \"->\", \"int\"] -> whatever\n try:\n func_token = tokens[tokens.index(\"\u27ff\") - 1]\n except ValueError:\n if tokens:\n func_token = tokens[-1]\n else:\n func_token = None\n\n if func_token:\n if func_token.startswith(\"@\") and platform in (\"cocoa\", \"swift\"):\n # Found a Swift attribute instead of a function name, must be an\n # anonymous function\n func_token = (\"thunk for \" if is_thunk else \"\") + \"closure\"\n function = (\n func_token.replace(\"\u27e8\", \"<\")\n .replace(\"\u25ef\", \"()\")\n .replace(\"\u27ff\", \"->\")\n .replace(\"\u3014anonymousnamespace\u3015\", \"`anonymous namespace'\")\n )\n\n # This really should never happen\n else:\n function = original_function\n\n # trim off rust markers\n function = _rust_hash.sub(\"\", function)\n\n # trim off gnu symbol versioning info\n function = _gnu_version.sub(\"\", function)\n\n # trim off windows decl markers\n return _windecl_hash.sub(\"\\\\1\", function)\n\n\ndef get_function_name_for_frame(frame, platform=None):\n \"\"\"Given a frame object or dictionary this returns the actual function\n name trimmed.\n \"\"\"\n if hasattr(frame, \"get_raw_data\"):\n frame = frame.get_raw_data()\n\n # if there is a raw function, prioritize the function unchanged\n if frame.get(\"raw_function\"):\n return frame.get(\"function\")\n\n # otherwise trim the function on demand\n rv = frame.get(\"function\")\n if rv:\n return trim_function_name(rv, frame.get(\"platform\") or platform)\n\n\ndef get_source_link_for_frame(frame: Frame) -> str | None:\n \"\"\"If source_link points to a GitHub raw content link, process it so that\n we can return the GitHub equivalent with the line number, and use it as a\n stacktrace link. Otherwise, return the link as is.\n \"\"\"\n source_link = getattr(frame, \"source_link\", None)\n\n try:\n URLValidator()(source_link)\n except ValidationError:\n return None\n\n parse_result = urlparse(source_link)\n if parse_result.netloc == \"raw.githubusercontent.com\":\n path_parts = parse_result.path.split(\"/\")\n if path_parts[0] == \"\":\n # the path starts with a \"/\" so the first element is empty string\n del path_parts[0]\n\n # at minimum, the path must have an author/org, a repo, and a file\n if len(path_parts) >= 3:\n source_link = \"https://www.github.com/\" + path_parts[0] + \"/\" + path_parts[1] + \"/blob\"\n for remaining_part in path_parts[2:]:\n source_link += \"/\" + remaining_part\n if getattr(frame, \"lineno\", None):\n source_link += \"#L\" + str(frame.lineno)\n return source_link\n\n\ndef set_in_app(frame: dict[str, Any], value: bool) -> None:\n \"\"\"Set the value of in_app in the frame to the given value.\"\"\"\n orig_in_app = frame.get(\"in_app\")\n if orig_in_app == value:\n return\n\n orig_in_app = int(orig_in_app) if orig_in_app is not None else -1\n setdefault_path(frame, \"data\", \"orig_in_app\", value=orig_in_app)\n frame[\"in_app\"] = value\n", "path": "src/sentry/stacktraces/functions.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import Any\nfrom urllib.parse import urlparse\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import URLValidator\n\nfrom sentry.interfaces.stacktrace import Frame\nfrom sentry.stacktraces.platform import get_behavior_family_for_platform\nfrom sentry.utils.safe import setdefault_path\n\n_windecl_hash = re.compile(r\"^@?(.*?)@[0-9]+$\")\n_rust_hash = re.compile(r\"::h[a-z0-9]{16}$\")\n_gnu_version = re.compile(r\"@@?GLIBC_([0-9.]+)$\")\n_cpp_trailer_re = re.compile(r\"(\\bconst\\b|&)$\")\n_rust_blanket_re = re.compile(r\"^([A-Z] as )\")\n_lambda_re = re.compile(\n r\"\"\"(?x)\n # gcc\n (?:\n \\{\n lambda\\(.*?\\)\\#\\d+\n \\}\n ) |\n # msvc\n (?:\n \\blambda_[a-f0-9]{32}\\b\n ) |\n # clang\n (?:\n \\$_\\d+\\b\n )\n \"\"\"\n)\n_anon_namespace_re = re.compile(\n r\"\"\"(?x)\n \\?A0x[a-f0-9]{8}::\n \"\"\"\n)\n\n\nPAIRS = {\"(\": \")\", \"{\": \"}\", \"[\": \"]\", \"<\": \">\"}\n\n\ndef replace_enclosed_string(s, start, end, replacement=None):\n if start not in s:\n return s\n\n depth = 0\n\n rv = []\n pair_start = None\n for idx, char in enumerate(s):\n if char == start:\n if depth == 0:\n pair_start = idx\n depth += 1\n elif char == end:\n depth -= 1\n if depth == 0:\n if replacement is not None:\n if callable(replacement):\n rv.append(replacement(s[pair_start + 1 : idx], pair_start))\n else:\n rv.append(replacement)\n elif depth == 0:\n rv.append(char)\n\n return \"\".join(rv)\n\n\ndef split_func_tokens(s):\n buf = []\n rv = []\n stack = []\n end = 0\n\n for idx, char in enumerate(s):\n if char in PAIRS:\n stack.append(PAIRS[char])\n elif stack and char == stack[-1]:\n stack.pop()\n if not stack:\n buf.append(s[end : idx + 1])\n end = idx + 1\n elif not stack:\n if char.isspace():\n if buf:\n rv.append(buf)\n buf = []\n else:\n buf.append(s[end : idx + 1])\n end = idx + 1\n\n if buf:\n rv.append(buf)\n\n return [\"\".join(x) for x in rv]\n\n\ndef trim_function_name(function, platform, normalize_lambdas=True):\n \"\"\"Given a function value from the frame's function attribute this returns\n a trimmed version that can be stored in `function_name`. This is only used\n if the client did not supply a value itself already.\n \"\"\"\n if platform == \"csharp\":\n return trim_csharp_function_name(function)\n if get_behavior_family_for_platform(platform) == \"native\":\n return trim_native_function_name(function, platform, normalize_lambdas=normalize_lambdas)\n return function\n\n\ndef trim_csharp_function_name(function):\n \"\"\"This trims off signatures from C# frames. This takes advantage of the\n Unity not emitting any return values and using a space before the argument\n list.\n\n Note that there is disagreement between Unity and the main .NET SDK about\n the function names. The Unity SDK emits the entire function with module\n in the `function` name similar to native, the .NET SDK emits it in individual\n parts of the frame.\n \"\"\"\n return function.split(\" (\", 1)[0]\n\n\ndef trim_native_function_name(function, platform, normalize_lambdas=True):\n if function in (\"<redacted>\", \"<unknown>\"):\n return function\n\n original_function = function\n function = function.strip()\n\n # Ensure we don't operate on objc functions\n if function.startswith((\"[\", \"+[\", \"-[\")):\n return function\n\n # Remove special `[clone .foo]` annotations for cloned/split functions\n def process_brackets(value, start):\n if value.startswith(\"clone .\"):\n return \"\"\n return \"[%s]\" % value\n\n function = replace_enclosed_string(function, \"[\", \"]\", process_brackets).rstrip()\n\n # Chop off C++ trailers\n while True:\n match = _cpp_trailer_re.search(function)\n if match is None:\n break\n function = function[: match.start()].rstrip()\n\n # Because operator<< really screws with our balancing, so let's work\n # around that by replacing it with a character we do not observe in\n # `split_func_tokens` or `replace_enclosed_string`.\n function = (\n function.replace(\"operator<<\", \"operator\u27e8\u27e8\")\n .replace(\"operator<\", \"operator\u27e8\")\n .replace(\"operator()\", \"operator\u25ef\")\n .replace(\"operator->\", \"operator\u27ff\")\n .replace(\" -> \", \" \u27ff \")\n .replace(\"`anonymous namespace'\", \"\u3014anonymousnamespace\u3015\")\n )\n\n # normalize C++ lambdas. This is necessary because different\n # compilers use different rules for now to name a lambda and they are\n # all quite inconsistent. This does not give us perfect answers to\n # this problem but closer. In particular msvc will call a lambda\n # something like `lambda_deadbeefeefffeeffeeff` whereas clang for\n # instance will name it `main::$_0` which will tell us in which outer\n # function it was declared.\n if normalize_lambdas:\n function = _lambda_re.sub(\"lambda\", function)\n\n # Normalize MSVC anonymous namespaces from inline functions. For inline\n # functions, the compiler inconsistently renders anonymous namespaces with\n # their hash. For regular functions, \"`anonymous namespace'\" is used.\n # The regular expression matches the trailing \"::\" to avoid accidental\n # replacement in mangled function names.\n if normalize_lambdas:\n function = _anon_namespace_re.sub(\"\u3014anonymousnamespace\u3015::\", function)\n\n # Remove the arguments if there is one.\n def process_args(value, start):\n value = value.strip()\n if value in (\"anonymous namespace\", \"operator\"):\n return \"(%s)\" % value\n return \"\"\n\n function = replace_enclosed_string(function, \"(\", \")\", process_args)\n\n # Resolve generic types, but special case rust which uses things like\n # <Foo as Bar>::baz to denote traits.\n def process_generics(value, start):\n # Special case for lambdas\n if value == \"lambda\" or _lambda_re.match(value):\n return \"<%s>\" % value\n\n if start > 0:\n return \"<T>\"\n\n # Rust special cases\n value = _rust_blanket_re.sub(\"\", value) # prefer trait for blanket impls\n value = replace_enclosed_string(value, \"<\", \">\", process_generics)\n return value.split(\" as \", 1)[0]\n\n function = replace_enclosed_string(function, \"<\", \">\", process_generics)\n\n is_thunk = \"thunk for \" in function # swift\n\n tokens = split_func_tokens(function)\n\n # MSVC demangles generic operator functions with a space between the\n # function name and the generics. Ensure that those two components both end\n # up in the function name.\n if len(tokens) > 1 and tokens[-1] == \"<T>\":\n tokens.pop()\n tokens[-1] += \" <T>\"\n\n # find the token which is the function name. Since we chopped of C++\n # trailers there are only two cases we care about: the token left to\n # the -> return marker which is for instance used in Swift and if that\n # is not found, consider the last token to be the function.\n #\n # [\"unsigned\", \"int\", \"whatever\"] -> whatever\n # [\"@objc\", \"whatever\", \"->\", \"int\"] -> whatever\n try:\n func_token = tokens[tokens.index(\"\u27ff\") - 1]\n except ValueError:\n if tokens:\n func_token = tokens[-1]\n else:\n func_token = None\n\n if func_token:\n if func_token.startswith(\"@\") and platform in (\"cocoa\", \"swift\"):\n # Found a Swift attribute instead of a function name, must be an\n # anonymous function\n func_token = (\"thunk for \" if is_thunk else \"\") + \"closure\"\n function = (\n func_token.replace(\"\u27e8\", \"<\")\n .replace(\"\u25ef\", \"()\")\n .replace(\"\u27ff\", \"->\")\n .replace(\"\u3014anonymousnamespace\u3015\", \"`anonymous namespace'\")\n )\n\n # This really should never happen\n else:\n function = original_function\n\n # trim off rust markers\n function = _rust_hash.sub(\"\", function)\n\n # trim off gnu symbol versioning info\n function = _gnu_version.sub(\"\", function)\n\n # trim off windows decl markers\n return _windecl_hash.sub(\"\\\\1\", function)\n\n\ndef get_function_name_for_frame(frame, platform=None):\n \"\"\"Given a frame object or dictionary this returns the actual function\n name trimmed.\n \"\"\"\n if hasattr(frame, \"get_raw_data\"):\n frame = frame.get_raw_data()\n\n # if there is a raw function, prioritize the function unchanged\n if frame.get(\"raw_function\"):\n return frame.get(\"function\")\n\n # otherwise trim the function on demand\n rv = frame.get(\"function\")\n if rv:\n return trim_function_name(rv, frame.get(\"platform\") or platform)\n\n\ndef get_source_link_for_frame(frame: Frame) -> str | None:\n \"\"\"If source_link points to a GitHub raw content link, process it so that\n we can return the GitHub equivalent with the line number, and use it as a\n stacktrace link. Otherwise, return the link as is.\n \"\"\"\n source_link = getattr(frame, \"source_link\", None)\n\n try:\n URLValidator()(source_link)\n except ValidationError:\n return None\n\n parse_result = urlparse(source_link)\n if parse_result.netloc == \"raw.githubusercontent.com\":\n path_parts = parse_result.path.split(\"/\")\n if path_parts[0] == \"\":\n # the path starts with a \"/\" so the first element is empty string\n del path_parts[0]\n\n # at minimum, the path must have an author/org, a repo, and a file\n if len(path_parts) >= 3:\n source_link = \"https://www.github.com/\" + path_parts[0] + \"/\" + path_parts[1] + \"/blob\"\n for remaining_part in path_parts[2:]:\n source_link += \"/\" + remaining_part\n if getattr(frame, \"lineno\", None):\n source_link += \"#L\" + str(frame.lineno)\n return source_link\n\n\ndef set_in_app(frame: dict[str, Any], value: bool) -> None:\n \"\"\"Set the value of in_app in the frame to the given value.\"\"\"\n orig_in_app = frame.get(\"in_app\")\n if orig_in_app == value:\n return\n\n orig_in_app = int(orig_in_app) if orig_in_app is not None else -1\n setdefault_path(frame, \"data\", \"orig_in_app\", value=orig_in_app)\n frame[\"in_app\"] = value\n", "path": "src/sentry/stacktraces/functions.py"}]}
| 3,977 | 303 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.