problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_11726
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-5842
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scrape_document with trailing slash fails
```
▶ docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS/ --depth=2
/usr/local/lib/python2.7/site-packages/django/utils/encoding.py:6: ImportWarning: Not importing directory '/app/locale': missing __init__.py
import locale
/usr/local/lib/python2.7/site-packages/django/template/backends/jinja2.py:6: ImportWarning: Not importing directory '/app/jinja2': missing __init__.py
import jinja2
/app/kuma/core/i18n.py:18: ImportWarning: Not importing directory '/app/kuma/core/jinja2': missing __init__.py
from jinja2 import nodes
/usr/local/lib/python2.7/site-packages/decorator_include.py:20: RemovedInDjango20Warning: Importing from django.core.urlresolvers is deprecated in favor of django.urls.
from django.core.urlresolvers import RegexURLPattern as URLPattern, RegexURLResolver as URLResolver
/usr/local/lib/python2.7/site-packages/soapbox/templatetags/soapbox.py:9: RemovedInDjango20Warning: assignment_tag() is deprecated. Use simple_tag() instead
@register.assignment_tag(takes_context=True)
INFO: Scrape progress, cycle 1: 1 Initializing, 1 Gathering Requirements
INFO: Scrape progress, cycle 2: 3 Initializing, 1 Gathering Requirements, 1 Done
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/app/kuma/scrape/management/commands/scrape_document.py", line 47, in handle
scraper.scrape()
File "/app/kuma/scrape/scraper.py", line 182, in scrape
dependencies = source.gather(self.requester, self.storage)
File "/app/kuma/scrape/sources/base.py", line 192, in gather
has_prereqs, data = self.load_prereqs(requester, storage)
File "/app/kuma/scrape/sources/document_children.py", line 31, in load_prereqs
data = response.json()
File "/usr/local/lib/python2.7/site-packages/requests/models.py", line 897, in json
return complexjson.loads(self.text, **kwargs)
File "/usr/local/lib/python2.7/site-packages/simplejson/__init__.py", line 516, in loads
return _default_decoder.decode(s)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
simplejson.scanner.JSONDecodeError: Expecting value: line 4 column 1 (char 3)
```
Note that if I retry without the trailing slash things work. I.e. `docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS --depth=2`
Somewhere in there it assumes that the response is JSON when in fact the response status code is not OKish.
scrape_document with trailing slash fails
```
▶ docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS/ --depth=2
/usr/local/lib/python2.7/site-packages/django/utils/encoding.py:6: ImportWarning: Not importing directory '/app/locale': missing __init__.py
import locale
/usr/local/lib/python2.7/site-packages/django/template/backends/jinja2.py:6: ImportWarning: Not importing directory '/app/jinja2': missing __init__.py
import jinja2
/app/kuma/core/i18n.py:18: ImportWarning: Not importing directory '/app/kuma/core/jinja2': missing __init__.py
from jinja2 import nodes
/usr/local/lib/python2.7/site-packages/decorator_include.py:20: RemovedInDjango20Warning: Importing from django.core.urlresolvers is deprecated in favor of django.urls.
from django.core.urlresolvers import RegexURLPattern as URLPattern, RegexURLResolver as URLResolver
/usr/local/lib/python2.7/site-packages/soapbox/templatetags/soapbox.py:9: RemovedInDjango20Warning: assignment_tag() is deprecated. Use simple_tag() instead
@register.assignment_tag(takes_context=True)
INFO: Scrape progress, cycle 1: 1 Initializing, 1 Gathering Requirements
INFO: Scrape progress, cycle 2: 3 Initializing, 1 Gathering Requirements, 1 Done
Traceback (most recent call last):
File "./manage.py", line 10, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 364, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 356, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python2.7/site-packages/django/core/management/base.py", line 283, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python2.7/site-packages/django/core/management/base.py", line 330, in execute
output = self.handle(*args, **options)
File "/app/kuma/scrape/management/commands/scrape_document.py", line 47, in handle
scraper.scrape()
File "/app/kuma/scrape/scraper.py", line 182, in scrape
dependencies = source.gather(self.requester, self.storage)
File "/app/kuma/scrape/sources/base.py", line 192, in gather
has_prereqs, data = self.load_prereqs(requester, storage)
File "/app/kuma/scrape/sources/document_children.py", line 31, in load_prereqs
data = response.json()
File "/usr/local/lib/python2.7/site-packages/requests/models.py", line 897, in json
return complexjson.loads(self.text, **kwargs)
File "/usr/local/lib/python2.7/site-packages/simplejson/__init__.py", line 516, in loads
return _default_decoder.decode(s)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 370, in decode
obj, end = self.raw_decode(s)
File "/usr/local/lib/python2.7/site-packages/simplejson/decoder.py", line 400, in raw_decode
return self.scan_once(s, idx=_w(s, idx).end())
simplejson.scanner.JSONDecodeError: Expecting value: line 4 column 1 (char 3)
```
Note that if I retry without the trailing slash things work. I.e. `docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS --depth=2`
Somewhere in there it assumes that the response is JSON when in fact the response status code is not OKish.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/scrape/management/commands/__init__.py`
Content:
```
1 """Common methods for scraping management commands."""
2 import logging
3 from argparse import ArgumentTypeError
4
5 from django.core.management.base import BaseCommand
6 from django.utils.six.moves.urllib.parse import urlparse
7
8 from kuma.scrape.scraper import Scraper
9
10
11 class ScrapeCommand(BaseCommand):
12 """Common base class for scraping management commands."""
13 def make_scraper(self, **options):
14 """Create a Scraper instance for management commands."""
15 return Scraper(**options)
16
17 def parse_url_or_path(self, url_or_path):
18 if url_or_path.startswith('http'):
19 bits = urlparse(url_or_path)
20 host = bits.netloc
21 path = bits.path
22 ssl = (bits.scheme == 'https')
23 else:
24 host = 'wiki.developer.mozilla.org'
25 ssl = True
26 path = url_or_path
27 return host, ssl, path
28
29 def setup_logging(self, verbosity):
30 """Update logger for desired verbosity."""
31
32 if verbosity == 0:
33 level = logging.WARNING
34 elif verbosity == 1: # default
35 level = logging.INFO
36 elif verbosity >= 2:
37 level = logging.DEBUG
38
39 formatter = logging.Formatter('%(levelname)s: %(message)s')
40 console = logging.StreamHandler(self.stderr)
41 console.setLevel(level)
42 console.setFormatter(formatter)
43 logger = logging.getLogger('kuma.scraper')
44 logger.setLevel(level)
45 logger.addHandler(console)
46 logger.propagate = False
47
48 def int_all_type(self, value):
49 """A command argument that can take an integer or 'all'."""
50 if value.strip().lower() == 'all':
51 return 'all'
52 try:
53 as_int = int(value)
54 except ValueError:
55 msg = "%r should be 'all' or an integer" % value
56 raise ArgumentTypeError(msg)
57 return as_int
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/scrape/management/commands/__init__.py b/kuma/scrape/management/commands/__init__.py
--- a/kuma/scrape/management/commands/__init__.py
+++ b/kuma/scrape/management/commands/__init__.py
@@ -18,12 +18,12 @@
if url_or_path.startswith('http'):
bits = urlparse(url_or_path)
host = bits.netloc
- path = bits.path
+ path = bits.path.rstrip('/')
ssl = (bits.scheme == 'https')
else:
host = 'wiki.developer.mozilla.org'
ssl = True
- path = url_or_path
+ path = url_or_path.rstrip('/')
return host, ssl, path
def setup_logging(self, verbosity):
|
{"golden_diff": "diff --git a/kuma/scrape/management/commands/__init__.py b/kuma/scrape/management/commands/__init__.py\n--- a/kuma/scrape/management/commands/__init__.py\n+++ b/kuma/scrape/management/commands/__init__.py\n@@ -18,12 +18,12 @@\n if url_or_path.startswith('http'):\n bits = urlparse(url_or_path)\n host = bits.netloc\n- path = bits.path\n+ path = bits.path.rstrip('/')\n ssl = (bits.scheme == 'https')\n else:\n host = 'wiki.developer.mozilla.org'\n ssl = True\n- path = url_or_path\n+ path = url_or_path.rstrip('/')\n return host, ssl, path\n \n def setup_logging(self, verbosity):\n", "issue": "scrape_document with trailing slash fails\n```\r\n\u25b6 docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS/ --depth=2\r\n/usr/local/lib/python2.7/site-packages/django/utils/encoding.py:6: ImportWarning: Not importing directory '/app/locale': missing __init__.py\r\n import locale\r\n/usr/local/lib/python2.7/site-packages/django/template/backends/jinja2.py:6: ImportWarning: Not importing directory '/app/jinja2': missing __init__.py\r\n import jinja2\r\n/app/kuma/core/i18n.py:18: ImportWarning: Not importing directory '/app/kuma/core/jinja2': missing __init__.py\r\n from jinja2 import nodes\r\n/usr/local/lib/python2.7/site-packages/decorator_include.py:20: RemovedInDjango20Warning: Importing from django.core.urlresolvers is deprecated in favor of django.urls.\r\n from django.core.urlresolvers import RegexURLPattern as URLPattern, RegexURLResolver as URLResolver\r\n/usr/local/lib/python2.7/site-packages/soapbox/templatetags/soapbox.py:9: RemovedInDjango20Warning: assignment_tag() is deprecated. Use simple_tag() instead\r\n @register.assignment_tag(takes_context=True)\r\nINFO: Scrape progress, cycle 1: 1 Initializing, 1 Gathering Requirements\r\nINFO: Scrape progress, cycle 2: 3 Initializing, 1 Gathering Requirements, 1 Done\r\nTraceback (most recent call last):\r\n File \"./manage.py\", line 10, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 364, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 356, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/app/kuma/scrape/management/commands/scrape_document.py\", line 47, in handle\r\n scraper.scrape()\r\n File \"/app/kuma/scrape/scraper.py\", line 182, in scrape\r\n dependencies = source.gather(self.requester, self.storage)\r\n File \"/app/kuma/scrape/sources/base.py\", line 192, in gather\r\n has_prereqs, data = self.load_prereqs(requester, storage)\r\n File \"/app/kuma/scrape/sources/document_children.py\", line 31, in load_prereqs\r\n data = response.json()\r\n File \"/usr/local/lib/python2.7/site-packages/requests/models.py\", line 897, in json\r\n return complexjson.loads(self.text, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\r\n return _default_decoder.decode(s)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\r\n obj, end = self.raw_decode(s)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\r\n return self.scan_once(s, idx=_w(s, idx).end())\r\nsimplejson.scanner.JSONDecodeError: Expecting value: line 4 column 1 (char 3)\r\n```\r\n\r\nNote that if I retry without the trailing slash things work. I.e. `docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS --depth=2`\r\n\r\nSomewhere in there it assumes that the response is JSON when in fact the response status code is not OKish. \nscrape_document with trailing slash fails\n```\r\n\u25b6 docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS/ --depth=2\r\n/usr/local/lib/python2.7/site-packages/django/utils/encoding.py:6: ImportWarning: Not importing directory '/app/locale': missing __init__.py\r\n import locale\r\n/usr/local/lib/python2.7/site-packages/django/template/backends/jinja2.py:6: ImportWarning: Not importing directory '/app/jinja2': missing __init__.py\r\n import jinja2\r\n/app/kuma/core/i18n.py:18: ImportWarning: Not importing directory '/app/kuma/core/jinja2': missing __init__.py\r\n from jinja2 import nodes\r\n/usr/local/lib/python2.7/site-packages/decorator_include.py:20: RemovedInDjango20Warning: Importing from django.core.urlresolvers is deprecated in favor of django.urls.\r\n from django.core.urlresolvers import RegexURLPattern as URLPattern, RegexURLResolver as URLResolver\r\n/usr/local/lib/python2.7/site-packages/soapbox/templatetags/soapbox.py:9: RemovedInDjango20Warning: assignment_tag() is deprecated. Use simple_tag() instead\r\n @register.assignment_tag(takes_context=True)\r\nINFO: Scrape progress, cycle 1: 1 Initializing, 1 Gathering Requirements\r\nINFO: Scrape progress, cycle 2: 3 Initializing, 1 Gathering Requirements, 1 Done\r\nTraceback (most recent call last):\r\n File \"./manage.py\", line 10, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 364, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 356, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/base.py\", line 283, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python2.7/site-packages/django/core/management/base.py\", line 330, in execute\r\n output = self.handle(*args, **options)\r\n File \"/app/kuma/scrape/management/commands/scrape_document.py\", line 47, in handle\r\n scraper.scrape()\r\n File \"/app/kuma/scrape/scraper.py\", line 182, in scrape\r\n dependencies = source.gather(self.requester, self.storage)\r\n File \"/app/kuma/scrape/sources/base.py\", line 192, in gather\r\n has_prereqs, data = self.load_prereqs(requester, storage)\r\n File \"/app/kuma/scrape/sources/document_children.py\", line 31, in load_prereqs\r\n data = response.json()\r\n File \"/usr/local/lib/python2.7/site-packages/requests/models.py\", line 897, in json\r\n return complexjson.loads(self.text, **kwargs)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/__init__.py\", line 516, in loads\r\n return _default_decoder.decode(s)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 370, in decode\r\n obj, end = self.raw_decode(s)\r\n File \"/usr/local/lib/python2.7/site-packages/simplejson/decoder.py\", line 400, in raw_decode\r\n return self.scan_once(s, idx=_w(s, idx).end())\r\nsimplejson.scanner.JSONDecodeError: Expecting value: line 4 column 1 (char 3)\r\n```\r\n\r\nNote that if I retry without the trailing slash things work. I.e. `docker-compose exec web ./manage.py scrape_document https://developer.mozilla.org/en-US/docs/Web/CSS --depth=2`\r\n\r\nSomewhere in there it assumes that the response is JSON when in fact the response status code is not OKish. \n", "before_files": [{"content": "\"\"\"Common methods for scraping management commands.\"\"\"\nimport logging\nfrom argparse import ArgumentTypeError\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils.six.moves.urllib.parse import urlparse\n\nfrom kuma.scrape.scraper import Scraper\n\n\nclass ScrapeCommand(BaseCommand):\n \"\"\"Common base class for scraping management commands.\"\"\"\n def make_scraper(self, **options):\n \"\"\"Create a Scraper instance for management commands.\"\"\"\n return Scraper(**options)\n\n def parse_url_or_path(self, url_or_path):\n if url_or_path.startswith('http'):\n bits = urlparse(url_or_path)\n host = bits.netloc\n path = bits.path\n ssl = (bits.scheme == 'https')\n else:\n host = 'wiki.developer.mozilla.org'\n ssl = True\n path = url_or_path\n return host, ssl, path\n\n def setup_logging(self, verbosity):\n \"\"\"Update logger for desired verbosity.\"\"\"\n\n if verbosity == 0:\n level = logging.WARNING\n elif verbosity == 1: # default\n level = logging.INFO\n elif verbosity >= 2:\n level = logging.DEBUG\n\n formatter = logging.Formatter('%(levelname)s: %(message)s')\n console = logging.StreamHandler(self.stderr)\n console.setLevel(level)\n console.setFormatter(formatter)\n logger = logging.getLogger('kuma.scraper')\n logger.setLevel(level)\n logger.addHandler(console)\n logger.propagate = False\n\n def int_all_type(self, value):\n \"\"\"A command argument that can take an integer or 'all'.\"\"\"\n if value.strip().lower() == 'all':\n return 'all'\n try:\n as_int = int(value)\n except ValueError:\n msg = \"%r should be 'all' or an integer\" % value\n raise ArgumentTypeError(msg)\n return as_int\n", "path": "kuma/scrape/management/commands/__init__.py"}], "after_files": [{"content": "\"\"\"Common methods for scraping management commands.\"\"\"\nimport logging\nfrom argparse import ArgumentTypeError\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils.six.moves.urllib.parse import urlparse\n\nfrom kuma.scrape.scraper import Scraper\n\n\nclass ScrapeCommand(BaseCommand):\n \"\"\"Common base class for scraping management commands.\"\"\"\n def make_scraper(self, **options):\n \"\"\"Create a Scraper instance for management commands.\"\"\"\n return Scraper(**options)\n\n def parse_url_or_path(self, url_or_path):\n if url_or_path.startswith('http'):\n bits = urlparse(url_or_path)\n host = bits.netloc\n path = bits.path.rstrip('/')\n ssl = (bits.scheme == 'https')\n else:\n host = 'wiki.developer.mozilla.org'\n ssl = True\n path = url_or_path.rstrip('/')\n return host, ssl, path\n\n def setup_logging(self, verbosity):\n \"\"\"Update logger for desired verbosity.\"\"\"\n\n if verbosity == 0:\n level = logging.WARNING\n elif verbosity == 1: # default\n level = logging.INFO\n elif verbosity >= 2:\n level = logging.DEBUG\n\n formatter = logging.Formatter('%(levelname)s: %(message)s')\n console = logging.StreamHandler(self.stderr)\n console.setLevel(level)\n console.setFormatter(formatter)\n logger = logging.getLogger('kuma.scraper')\n logger.setLevel(level)\n logger.addHandler(console)\n logger.propagate = False\n\n def int_all_type(self, value):\n \"\"\"A command argument that can take an integer or 'all'.\"\"\"\n if value.strip().lower() == 'all':\n return 'all'\n try:\n as_int = int(value)\n except ValueError:\n msg = \"%r should be 'all' or an integer\" % value\n raise ArgumentTypeError(msg)\n return as_int\n", "path": "kuma/scrape/management/commands/__init__.py"}]}
| 2,554 | 174 |
gh_patches_debug_29416
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-1557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] storage_path is missing in manifest.json
### Description
there is no "model": "documents.storage_path" in manifest.json. I found it only in db.sqlite
### Steps to reproduce
open export/manifest.json after `docker compose exec webserver document_exporter ../export -f`
### Webserver logs
_No response_
### Paperless-ngx version
1.8.0
### Host OS
Raspberry Pi 4/arm64
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/management/commands/document_exporter.py`
Content:
```
1 import hashlib
2 import json
3 import os
4 import shutil
5 import time
6
7 import tqdm
8 from django.conf import settings
9 from django.contrib.auth.models import Group
10 from django.contrib.auth.models import User
11 from django.core import serializers
12 from django.core.management.base import BaseCommand
13 from django.core.management.base import CommandError
14 from django.db import transaction
15 from documents.models import Comment
16 from documents.models import Correspondent
17 from documents.models import Document
18 from documents.models import DocumentType
19 from documents.models import SavedView
20 from documents.models import SavedViewFilterRule
21 from documents.models import Tag
22 from documents.models import UiSettings
23 from documents.settings import EXPORTER_ARCHIVE_NAME
24 from documents.settings import EXPORTER_FILE_NAME
25 from documents.settings import EXPORTER_THUMBNAIL_NAME
26 from filelock import FileLock
27 from paperless import version
28 from paperless.db import GnuPG
29 from paperless_mail.models import MailAccount
30 from paperless_mail.models import MailRule
31
32 from ...file_handling import delete_empty_directories
33 from ...file_handling import generate_filename
34
35
36 class Command(BaseCommand):
37
38 help = """
39 Decrypt and rename all files in our collection into a given target
40 directory. And include a manifest file containing document data for
41 easy import.
42 """.replace(
43 " ",
44 "",
45 )
46
47 def add_arguments(self, parser):
48 parser.add_argument("target")
49
50 parser.add_argument(
51 "-c",
52 "--compare-checksums",
53 default=False,
54 action="store_true",
55 help="Compare file checksums when determining whether to export "
56 "a file or not. If not specified, file size and time "
57 "modified is used instead.",
58 )
59
60 parser.add_argument(
61 "-f",
62 "--use-filename-format",
63 default=False,
64 action="store_true",
65 help="Use PAPERLESS_FILENAME_FORMAT for storing files in the "
66 "export directory, if configured.",
67 )
68
69 parser.add_argument(
70 "-d",
71 "--delete",
72 default=False,
73 action="store_true",
74 help="After exporting, delete files in the export directory that "
75 "do not belong to the current export, such as files from "
76 "deleted documents.",
77 )
78 parser.add_argument(
79 "--no-progress-bar",
80 default=False,
81 action="store_true",
82 help="If set, the progress bar will not be shown",
83 )
84
85 def __init__(self, *args, **kwargs):
86 BaseCommand.__init__(self, *args, **kwargs)
87 self.target = None
88 self.files_in_export_dir = []
89 self.exported_files = []
90 self.compare_checksums = False
91 self.use_filename_format = False
92 self.delete = False
93
94 def handle(self, *args, **options):
95
96 self.target = options["target"]
97 self.compare_checksums = options["compare_checksums"]
98 self.use_filename_format = options["use_filename_format"]
99 self.delete = options["delete"]
100
101 if not os.path.exists(self.target):
102 raise CommandError("That path doesn't exist")
103
104 if not os.access(self.target, os.W_OK):
105 raise CommandError("That path doesn't appear to be writable")
106
107 with FileLock(settings.MEDIA_LOCK):
108 self.dump(options["no_progress_bar"])
109
110 def dump(self, progress_bar_disable=False):
111 # 1. Take a snapshot of what files exist in the current export folder
112 for root, dirs, files in os.walk(self.target):
113 self.files_in_export_dir.extend(
114 map(lambda f: os.path.abspath(os.path.join(root, f)), files),
115 )
116
117 # 2. Create manifest, containing all correspondents, types, tags,
118 # documents and ui_settings
119 with transaction.atomic():
120 manifest = json.loads(
121 serializers.serialize("json", Correspondent.objects.all()),
122 )
123
124 manifest += json.loads(serializers.serialize("json", Tag.objects.all()))
125
126 manifest += json.loads(
127 serializers.serialize("json", DocumentType.objects.all()),
128 )
129
130 manifest += json.loads(
131 serializers.serialize("json", Comment.objects.all()),
132 )
133
134 documents = Document.objects.order_by("id")
135 document_map = {d.pk: d for d in documents}
136 document_manifest = json.loads(serializers.serialize("json", documents))
137 manifest += document_manifest
138
139 manifest += json.loads(
140 serializers.serialize("json", MailAccount.objects.all()),
141 )
142
143 manifest += json.loads(
144 serializers.serialize("json", MailRule.objects.all()),
145 )
146
147 manifest += json.loads(
148 serializers.serialize("json", SavedView.objects.all()),
149 )
150
151 manifest += json.loads(
152 serializers.serialize("json", SavedViewFilterRule.objects.all()),
153 )
154
155 manifest += json.loads(serializers.serialize("json", Group.objects.all()))
156
157 manifest += json.loads(serializers.serialize("json", User.objects.all()))
158
159 manifest += json.loads(
160 serializers.serialize("json", UiSettings.objects.all()),
161 )
162
163 # 3. Export files from each document
164 for index, document_dict in tqdm.tqdm(
165 enumerate(document_manifest),
166 total=len(document_manifest),
167 disable=progress_bar_disable,
168 ):
169 # 3.1. store files unencrypted
170 document_dict["fields"]["storage_type"] = Document.STORAGE_TYPE_UNENCRYPTED
171
172 document = document_map[document_dict["pk"]]
173
174 # 3.2. generate a unique filename
175 filename_counter = 0
176 while True:
177 if self.use_filename_format:
178 base_name = generate_filename(
179 document,
180 counter=filename_counter,
181 append_gpg=False,
182 )
183 else:
184 base_name = document.get_public_filename(counter=filename_counter)
185
186 if base_name not in self.exported_files:
187 self.exported_files.append(base_name)
188 break
189 else:
190 filename_counter += 1
191
192 # 3.3. write filenames into manifest
193 original_name = base_name
194 original_target = os.path.join(self.target, original_name)
195 document_dict[EXPORTER_FILE_NAME] = original_name
196
197 thumbnail_name = base_name + "-thumbnail.webp"
198 thumbnail_target = os.path.join(self.target, thumbnail_name)
199 document_dict[EXPORTER_THUMBNAIL_NAME] = thumbnail_name
200
201 if document.has_archive_version:
202 archive_name = base_name + "-archive.pdf"
203 archive_target = os.path.join(self.target, archive_name)
204 document_dict[EXPORTER_ARCHIVE_NAME] = archive_name
205 else:
206 archive_target = None
207
208 # 3.4. write files to target folder
209 t = int(time.mktime(document.created.timetuple()))
210 if document.storage_type == Document.STORAGE_TYPE_GPG:
211
212 os.makedirs(os.path.dirname(original_target), exist_ok=True)
213 with open(original_target, "wb") as f:
214 with document.source_file as out_file:
215 f.write(GnuPG.decrypted(out_file))
216 os.utime(original_target, times=(t, t))
217
218 os.makedirs(os.path.dirname(thumbnail_target), exist_ok=True)
219 with open(thumbnail_target, "wb") as f:
220 with document.thumbnail_file as out_file:
221 f.write(GnuPG.decrypted(out_file))
222 os.utime(thumbnail_target, times=(t, t))
223
224 if archive_target:
225 os.makedirs(os.path.dirname(archive_target), exist_ok=True)
226 with open(archive_target, "wb") as f:
227 with document.archive_path as out_file:
228 f.write(GnuPG.decrypted(out_file))
229 os.utime(archive_target, times=(t, t))
230 else:
231 self.check_and_copy(
232 document.source_path,
233 document.checksum,
234 original_target,
235 )
236
237 self.check_and_copy(document.thumbnail_path, None, thumbnail_target)
238
239 if archive_target:
240 self.check_and_copy(
241 document.archive_path,
242 document.archive_checksum,
243 archive_target,
244 )
245
246 # 4.1 write manifest to target folder
247 manifest_path = os.path.abspath(os.path.join(self.target, "manifest.json"))
248
249 with open(manifest_path, "w") as f:
250 json.dump(manifest, f, indent=2)
251
252 # 4.2 write version information to target folder
253 version_path = os.path.abspath(os.path.join(self.target, "version.json"))
254
255 with open(version_path, "w") as f:
256 json.dump({"version": version.__full_version_str__}, f, indent=2)
257
258 if self.delete:
259 # 5. Remove files which we did not explicitly export in this run
260
261 if manifest_path in self.files_in_export_dir:
262 self.files_in_export_dir.remove(manifest_path)
263
264 for f in self.files_in_export_dir:
265 os.remove(f)
266
267 delete_empty_directories(
268 os.path.abspath(os.path.dirname(f)),
269 os.path.abspath(self.target),
270 )
271
272 def check_and_copy(self, source, source_checksum, target):
273 if os.path.abspath(target) in self.files_in_export_dir:
274 self.files_in_export_dir.remove(os.path.abspath(target))
275
276 perform_copy = False
277
278 if os.path.exists(target):
279 source_stat = os.stat(source)
280 target_stat = os.stat(target)
281 if self.compare_checksums and source_checksum:
282 with open(target, "rb") as f:
283 target_checksum = hashlib.md5(f.read()).hexdigest()
284 perform_copy = target_checksum != source_checksum
285 elif source_stat.st_mtime != target_stat.st_mtime:
286 perform_copy = True
287 elif source_stat.st_size != target_stat.st_size:
288 perform_copy = True
289 else:
290 # Copy if it does not exist
291 perform_copy = True
292
293 if perform_copy:
294 os.makedirs(os.path.dirname(target), exist_ok=True)
295 shutil.copy2(source, target)
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/documents/management/commands/document_exporter.py b/src/documents/management/commands/document_exporter.py
--- a/src/documents/management/commands/document_exporter.py
+++ b/src/documents/management/commands/document_exporter.py
@@ -18,6 +18,7 @@
from documents.models import DocumentType
from documents.models import SavedView
from documents.models import SavedViewFilterRule
+from documents.models import StoragePath
from documents.models import Tag
from documents.models import UiSettings
from documents.settings import EXPORTER_ARCHIVE_NAME
@@ -114,8 +115,8 @@
map(lambda f: os.path.abspath(os.path.join(root, f)), files),
)
- # 2. Create manifest, containing all correspondents, types, tags,
- # documents and ui_settings
+ # 2. Create manifest, containing all correspondents, types, tags, storage paths
+ # comments, documents and ui_settings
with transaction.atomic():
manifest = json.loads(
serializers.serialize("json", Correspondent.objects.all()),
@@ -127,6 +128,10 @@
serializers.serialize("json", DocumentType.objects.all()),
)
+ manifest += json.loads(
+ serializers.serialize("json", StoragePath.objects.all()),
+ )
+
manifest += json.loads(
serializers.serialize("json", Comment.objects.all()),
)
|
{"golden_diff": "diff --git a/src/documents/management/commands/document_exporter.py b/src/documents/management/commands/document_exporter.py\n--- a/src/documents/management/commands/document_exporter.py\n+++ b/src/documents/management/commands/document_exporter.py\n@@ -18,6 +18,7 @@\n from documents.models import DocumentType\n from documents.models import SavedView\n from documents.models import SavedViewFilterRule\n+from documents.models import StoragePath\n from documents.models import Tag\n from documents.models import UiSettings\n from documents.settings import EXPORTER_ARCHIVE_NAME\n@@ -114,8 +115,8 @@\n map(lambda f: os.path.abspath(os.path.join(root, f)), files),\n )\n \n- # 2. Create manifest, containing all correspondents, types, tags,\n- # documents and ui_settings\n+ # 2. Create manifest, containing all correspondents, types, tags, storage paths\n+ # comments, documents and ui_settings\n with transaction.atomic():\n manifest = json.loads(\n serializers.serialize(\"json\", Correspondent.objects.all()),\n@@ -127,6 +128,10 @@\n serializers.serialize(\"json\", DocumentType.objects.all()),\n )\n \n+ manifest += json.loads(\n+ serializers.serialize(\"json\", StoragePath.objects.all()),\n+ )\n+\n manifest += json.loads(\n serializers.serialize(\"json\", Comment.objects.all()),\n )\n", "issue": "[BUG] storage_path is missing in manifest.json\n### Description\n\nthere is no \"model\": \"documents.storage_path\" in manifest.json. I found it only in db.sqlite\n\n### Steps to reproduce\n\nopen export/manifest.json after `docker compose exec webserver document_exporter ../export -f`\n\n### Webserver logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nRaspberry Pi 4/arm64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import hashlib\nimport json\nimport os\nimport shutil\nimport time\n\nimport tqdm\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import User\nfrom django.core import serializers\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\nfrom django.db import transaction\nfrom documents.models import Comment\nfrom documents.models import Correspondent\nfrom documents.models import Document\nfrom documents.models import DocumentType\nfrom documents.models import SavedView\nfrom documents.models import SavedViewFilterRule\nfrom documents.models import Tag\nfrom documents.models import UiSettings\nfrom documents.settings import EXPORTER_ARCHIVE_NAME\nfrom documents.settings import EXPORTER_FILE_NAME\nfrom documents.settings import EXPORTER_THUMBNAIL_NAME\nfrom filelock import FileLock\nfrom paperless import version\nfrom paperless.db import GnuPG\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\n\nfrom ...file_handling import delete_empty_directories\nfrom ...file_handling import generate_filename\n\n\nclass Command(BaseCommand):\n\n help = \"\"\"\n Decrypt and rename all files in our collection into a given target\n directory. And include a manifest file containing document data for\n easy import.\n \"\"\".replace(\n \" \",\n \"\",\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\"target\")\n\n parser.add_argument(\n \"-c\",\n \"--compare-checksums\",\n default=False,\n action=\"store_true\",\n help=\"Compare file checksums when determining whether to export \"\n \"a file or not. If not specified, file size and time \"\n \"modified is used instead.\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--use-filename-format\",\n default=False,\n action=\"store_true\",\n help=\"Use PAPERLESS_FILENAME_FORMAT for storing files in the \"\n \"export directory, if configured.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--delete\",\n default=False,\n action=\"store_true\",\n help=\"After exporting, delete files in the export directory that \"\n \"do not belong to the current export, such as files from \"\n \"deleted documents.\",\n )\n parser.add_argument(\n \"--no-progress-bar\",\n default=False,\n action=\"store_true\",\n help=\"If set, the progress bar will not be shown\",\n )\n\n def __init__(self, *args, **kwargs):\n BaseCommand.__init__(self, *args, **kwargs)\n self.target = None\n self.files_in_export_dir = []\n self.exported_files = []\n self.compare_checksums = False\n self.use_filename_format = False\n self.delete = False\n\n def handle(self, *args, **options):\n\n self.target = options[\"target\"]\n self.compare_checksums = options[\"compare_checksums\"]\n self.use_filename_format = options[\"use_filename_format\"]\n self.delete = options[\"delete\"]\n\n if not os.path.exists(self.target):\n raise CommandError(\"That path doesn't exist\")\n\n if not os.access(self.target, os.W_OK):\n raise CommandError(\"That path doesn't appear to be writable\")\n\n with FileLock(settings.MEDIA_LOCK):\n self.dump(options[\"no_progress_bar\"])\n\n def dump(self, progress_bar_disable=False):\n # 1. Take a snapshot of what files exist in the current export folder\n for root, dirs, files in os.walk(self.target):\n self.files_in_export_dir.extend(\n map(lambda f: os.path.abspath(os.path.join(root, f)), files),\n )\n\n # 2. Create manifest, containing all correspondents, types, tags,\n # documents and ui_settings\n with transaction.atomic():\n manifest = json.loads(\n serializers.serialize(\"json\", Correspondent.objects.all()),\n )\n\n manifest += json.loads(serializers.serialize(\"json\", Tag.objects.all()))\n\n manifest += json.loads(\n serializers.serialize(\"json\", DocumentType.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", Comment.objects.all()),\n )\n\n documents = Document.objects.order_by(\"id\")\n document_map = {d.pk: d for d in documents}\n document_manifest = json.loads(serializers.serialize(\"json\", documents))\n manifest += document_manifest\n\n manifest += json.loads(\n serializers.serialize(\"json\", MailAccount.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", MailRule.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", SavedView.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", SavedViewFilterRule.objects.all()),\n )\n\n manifest += json.loads(serializers.serialize(\"json\", Group.objects.all()))\n\n manifest += json.loads(serializers.serialize(\"json\", User.objects.all()))\n\n manifest += json.loads(\n serializers.serialize(\"json\", UiSettings.objects.all()),\n )\n\n # 3. Export files from each document\n for index, document_dict in tqdm.tqdm(\n enumerate(document_manifest),\n total=len(document_manifest),\n disable=progress_bar_disable,\n ):\n # 3.1. store files unencrypted\n document_dict[\"fields\"][\"storage_type\"] = Document.STORAGE_TYPE_UNENCRYPTED\n\n document = document_map[document_dict[\"pk\"]]\n\n # 3.2. generate a unique filename\n filename_counter = 0\n while True:\n if self.use_filename_format:\n base_name = generate_filename(\n document,\n counter=filename_counter,\n append_gpg=False,\n )\n else:\n base_name = document.get_public_filename(counter=filename_counter)\n\n if base_name not in self.exported_files:\n self.exported_files.append(base_name)\n break\n else:\n filename_counter += 1\n\n # 3.3. write filenames into manifest\n original_name = base_name\n original_target = os.path.join(self.target, original_name)\n document_dict[EXPORTER_FILE_NAME] = original_name\n\n thumbnail_name = base_name + \"-thumbnail.webp\"\n thumbnail_target = os.path.join(self.target, thumbnail_name)\n document_dict[EXPORTER_THUMBNAIL_NAME] = thumbnail_name\n\n if document.has_archive_version:\n archive_name = base_name + \"-archive.pdf\"\n archive_target = os.path.join(self.target, archive_name)\n document_dict[EXPORTER_ARCHIVE_NAME] = archive_name\n else:\n archive_target = None\n\n # 3.4. write files to target folder\n t = int(time.mktime(document.created.timetuple()))\n if document.storage_type == Document.STORAGE_TYPE_GPG:\n\n os.makedirs(os.path.dirname(original_target), exist_ok=True)\n with open(original_target, \"wb\") as f:\n with document.source_file as out_file:\n f.write(GnuPG.decrypted(out_file))\n os.utime(original_target, times=(t, t))\n\n os.makedirs(os.path.dirname(thumbnail_target), exist_ok=True)\n with open(thumbnail_target, \"wb\") as f:\n with document.thumbnail_file as out_file:\n f.write(GnuPG.decrypted(out_file))\n os.utime(thumbnail_target, times=(t, t))\n\n if archive_target:\n os.makedirs(os.path.dirname(archive_target), exist_ok=True)\n with open(archive_target, \"wb\") as f:\n with document.archive_path as out_file:\n f.write(GnuPG.decrypted(out_file))\n os.utime(archive_target, times=(t, t))\n else:\n self.check_and_copy(\n document.source_path,\n document.checksum,\n original_target,\n )\n\n self.check_and_copy(document.thumbnail_path, None, thumbnail_target)\n\n if archive_target:\n self.check_and_copy(\n document.archive_path,\n document.archive_checksum,\n archive_target,\n )\n\n # 4.1 write manifest to target folder\n manifest_path = os.path.abspath(os.path.join(self.target, \"manifest.json\"))\n\n with open(manifest_path, \"w\") as f:\n json.dump(manifest, f, indent=2)\n\n # 4.2 write version information to target folder\n version_path = os.path.abspath(os.path.join(self.target, \"version.json\"))\n\n with open(version_path, \"w\") as f:\n json.dump({\"version\": version.__full_version_str__}, f, indent=2)\n\n if self.delete:\n # 5. Remove files which we did not explicitly export in this run\n\n if manifest_path in self.files_in_export_dir:\n self.files_in_export_dir.remove(manifest_path)\n\n for f in self.files_in_export_dir:\n os.remove(f)\n\n delete_empty_directories(\n os.path.abspath(os.path.dirname(f)),\n os.path.abspath(self.target),\n )\n\n def check_and_copy(self, source, source_checksum, target):\n if os.path.abspath(target) in self.files_in_export_dir:\n self.files_in_export_dir.remove(os.path.abspath(target))\n\n perform_copy = False\n\n if os.path.exists(target):\n source_stat = os.stat(source)\n target_stat = os.stat(target)\n if self.compare_checksums and source_checksum:\n with open(target, \"rb\") as f:\n target_checksum = hashlib.md5(f.read()).hexdigest()\n perform_copy = target_checksum != source_checksum\n elif source_stat.st_mtime != target_stat.st_mtime:\n perform_copy = True\n elif source_stat.st_size != target_stat.st_size:\n perform_copy = True\n else:\n # Copy if it does not exist\n perform_copy = True\n\n if perform_copy:\n os.makedirs(os.path.dirname(target), exist_ok=True)\n shutil.copy2(source, target)\n", "path": "src/documents/management/commands/document_exporter.py"}], "after_files": [{"content": "import hashlib\nimport json\nimport os\nimport shutil\nimport time\n\nimport tqdm\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.models import User\nfrom django.core import serializers\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\nfrom django.db import transaction\nfrom documents.models import Comment\nfrom documents.models import Correspondent\nfrom documents.models import Document\nfrom documents.models import DocumentType\nfrom documents.models import SavedView\nfrom documents.models import SavedViewFilterRule\nfrom documents.models import StoragePath\nfrom documents.models import Tag\nfrom documents.models import UiSettings\nfrom documents.settings import EXPORTER_ARCHIVE_NAME\nfrom documents.settings import EXPORTER_FILE_NAME\nfrom documents.settings import EXPORTER_THUMBNAIL_NAME\nfrom filelock import FileLock\nfrom paperless import version\nfrom paperless.db import GnuPG\nfrom paperless_mail.models import MailAccount\nfrom paperless_mail.models import MailRule\n\nfrom ...file_handling import delete_empty_directories\nfrom ...file_handling import generate_filename\n\n\nclass Command(BaseCommand):\n\n help = \"\"\"\n Decrypt and rename all files in our collection into a given target\n directory. And include a manifest file containing document data for\n easy import.\n \"\"\".replace(\n \" \",\n \"\",\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\"target\")\n\n parser.add_argument(\n \"-c\",\n \"--compare-checksums\",\n default=False,\n action=\"store_true\",\n help=\"Compare file checksums when determining whether to export \"\n \"a file or not. If not specified, file size and time \"\n \"modified is used instead.\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--use-filename-format\",\n default=False,\n action=\"store_true\",\n help=\"Use PAPERLESS_FILENAME_FORMAT for storing files in the \"\n \"export directory, if configured.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--delete\",\n default=False,\n action=\"store_true\",\n help=\"After exporting, delete files in the export directory that \"\n \"do not belong to the current export, such as files from \"\n \"deleted documents.\",\n )\n parser.add_argument(\n \"--no-progress-bar\",\n default=False,\n action=\"store_true\",\n help=\"If set, the progress bar will not be shown\",\n )\n\n def __init__(self, *args, **kwargs):\n BaseCommand.__init__(self, *args, **kwargs)\n self.target = None\n self.files_in_export_dir = []\n self.exported_files = []\n self.compare_checksums = False\n self.use_filename_format = False\n self.delete = False\n\n def handle(self, *args, **options):\n\n self.target = options[\"target\"]\n self.compare_checksums = options[\"compare_checksums\"]\n self.use_filename_format = options[\"use_filename_format\"]\n self.delete = options[\"delete\"]\n\n if not os.path.exists(self.target):\n raise CommandError(\"That path doesn't exist\")\n\n if not os.access(self.target, os.W_OK):\n raise CommandError(\"That path doesn't appear to be writable\")\n\n with FileLock(settings.MEDIA_LOCK):\n self.dump(options[\"no_progress_bar\"])\n\n def dump(self, progress_bar_disable=False):\n # 1. Take a snapshot of what files exist in the current export folder\n for root, dirs, files in os.walk(self.target):\n self.files_in_export_dir.extend(\n map(lambda f: os.path.abspath(os.path.join(root, f)), files),\n )\n\n # 2. Create manifest, containing all correspondents, types, tags, storage paths\n # comments, documents and ui_settings\n with transaction.atomic():\n manifest = json.loads(\n serializers.serialize(\"json\", Correspondent.objects.all()),\n )\n\n manifest += json.loads(serializers.serialize(\"json\", Tag.objects.all()))\n\n manifest += json.loads(\n serializers.serialize(\"json\", DocumentType.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", StoragePath.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", Comment.objects.all()),\n )\n\n documents = Document.objects.order_by(\"id\")\n document_map = {d.pk: d for d in documents}\n document_manifest = json.loads(serializers.serialize(\"json\", documents))\n manifest += document_manifest\n\n manifest += json.loads(\n serializers.serialize(\"json\", MailAccount.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", MailRule.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", SavedView.objects.all()),\n )\n\n manifest += json.loads(\n serializers.serialize(\"json\", SavedViewFilterRule.objects.all()),\n )\n\n manifest += json.loads(serializers.serialize(\"json\", Group.objects.all()))\n\n manifest += json.loads(serializers.serialize(\"json\", User.objects.all()))\n\n manifest += json.loads(\n serializers.serialize(\"json\", UiSettings.objects.all()),\n )\n\n # 3. Export files from each document\n for index, document_dict in tqdm.tqdm(\n enumerate(document_manifest),\n total=len(document_manifest),\n disable=progress_bar_disable,\n ):\n # 3.1. store files unencrypted\n document_dict[\"fields\"][\"storage_type\"] = Document.STORAGE_TYPE_UNENCRYPTED\n\n document = document_map[document_dict[\"pk\"]]\n\n # 3.2. generate a unique filename\n filename_counter = 0\n while True:\n if self.use_filename_format:\n base_name = generate_filename(\n document,\n counter=filename_counter,\n append_gpg=False,\n )\n else:\n base_name = document.get_public_filename(counter=filename_counter)\n\n if base_name not in self.exported_files:\n self.exported_files.append(base_name)\n break\n else:\n filename_counter += 1\n\n # 3.3. write filenames into manifest\n original_name = base_name\n original_target = os.path.join(self.target, original_name)\n document_dict[EXPORTER_FILE_NAME] = original_name\n\n thumbnail_name = base_name + \"-thumbnail.webp\"\n thumbnail_target = os.path.join(self.target, thumbnail_name)\n document_dict[EXPORTER_THUMBNAIL_NAME] = thumbnail_name\n\n if document.has_archive_version:\n archive_name = base_name + \"-archive.pdf\"\n archive_target = os.path.join(self.target, archive_name)\n document_dict[EXPORTER_ARCHIVE_NAME] = archive_name\n else:\n archive_target = None\n\n # 3.4. write files to target folder\n t = int(time.mktime(document.created.timetuple()))\n if document.storage_type == Document.STORAGE_TYPE_GPG:\n\n os.makedirs(os.path.dirname(original_target), exist_ok=True)\n with open(original_target, \"wb\") as f:\n with document.source_file as out_file:\n f.write(GnuPG.decrypted(out_file))\n os.utime(original_target, times=(t, t))\n\n os.makedirs(os.path.dirname(thumbnail_target), exist_ok=True)\n with open(thumbnail_target, \"wb\") as f:\n with document.thumbnail_file as out_file:\n f.write(GnuPG.decrypted(out_file))\n os.utime(thumbnail_target, times=(t, t))\n\n if archive_target:\n os.makedirs(os.path.dirname(archive_target), exist_ok=True)\n with open(archive_target, \"wb\") as f:\n with document.archive_path as out_file:\n f.write(GnuPG.decrypted(out_file))\n os.utime(archive_target, times=(t, t))\n else:\n self.check_and_copy(\n document.source_path,\n document.checksum,\n original_target,\n )\n\n self.check_and_copy(document.thumbnail_path, None, thumbnail_target)\n\n if archive_target:\n self.check_and_copy(\n document.archive_path,\n document.archive_checksum,\n archive_target,\n )\n\n # 4.1 write manifest to target folder\n manifest_path = os.path.abspath(os.path.join(self.target, \"manifest.json\"))\n\n with open(manifest_path, \"w\") as f:\n json.dump(manifest, f, indent=2)\n\n # 4.2 write version information to target folder\n version_path = os.path.abspath(os.path.join(self.target, \"version.json\"))\n\n with open(version_path, \"w\") as f:\n json.dump({\"version\": version.__full_version_str__}, f, indent=2)\n\n if self.delete:\n # 5. Remove files which we did not explicitly export in this run\n\n if manifest_path in self.files_in_export_dir:\n self.files_in_export_dir.remove(manifest_path)\n\n for f in self.files_in_export_dir:\n os.remove(f)\n\n delete_empty_directories(\n os.path.abspath(os.path.dirname(f)),\n os.path.abspath(self.target),\n )\n\n def check_and_copy(self, source, source_checksum, target):\n if os.path.abspath(target) in self.files_in_export_dir:\n self.files_in_export_dir.remove(os.path.abspath(target))\n\n perform_copy = False\n\n if os.path.exists(target):\n source_stat = os.stat(source)\n target_stat = os.stat(target)\n if self.compare_checksums and source_checksum:\n with open(target, \"rb\") as f:\n target_checksum = hashlib.md5(f.read()).hexdigest()\n perform_copy = target_checksum != source_checksum\n elif source_stat.st_mtime != target_stat.st_mtime:\n perform_copy = True\n elif source_stat.st_size != target_stat.st_size:\n perform_copy = True\n else:\n # Copy if it does not exist\n perform_copy = True\n\n if perform_copy:\n os.makedirs(os.path.dirname(target), exist_ok=True)\n shutil.copy2(source, target)\n", "path": "src/documents/management/commands/document_exporter.py"}]}
| 3,239 | 300 |
gh_patches_debug_15487
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-3511
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Building MkDocs' documentation
When I began working on #3493 I needed to be able to run the dev server with MkDocs' own documentation, which was more difficult than it should have been.
First, let me say that I always work from a venv, and created a new one to start my work. In the past, one could simply do `pip install -r requirements/docs.txt` and then `mkdocs serve` and it worked. But I had to jump through a lot of hoops and learn new tools to get things working this time.
To be clear, I am not suggesting that the old tools should be brought back. Nor am I suggesting that my preferred tools be used. However, I could find no documentation about how to proceed. Eventually, I did find documentation that hatch is used for tests, and looking at `pyproject.toml` I could see a config for a hatch `docs` env. But not having ever used that tool before, it took be multiple tries (and searches) to work out how to even use that env. But, I'm getting ahead of myself...
After realizing that there were no requirements.txt files, I next looked for optional dependencies defined in `pyproject.toml`. I was hoping to maybe do `pip install .[docs]` (`.` rather than `markdown` because I was working from the working tree of the git repo). When I determined that that wasn't an option, I began looking into the hatch options. Finally in some random question in some forum I found an explanation of how to run the `shell` subcommand with an alternate env: `hatch -e docs shell`.
And then I could finally run `mkdocs serve`. Except that I got an error about missing `po` files, which is weird because I am using English, which should work without any translations being defined. Finally, after generating `po` and `mo` files, I could run the dev server and begin my work.
All of this led me to believe that the current maintainers are not ever running the dev server with MkDocs documentation. And I also could not find any automations for deploying the documentation, so I couldn't even use that as a reference.
Again, I am not being critical of the tool choices. I see that a switch was made from tox to hatch. That's fine if that is the tool that the maintainers what to use. But for an occasional contributor, I would prefer to not need to learn these tools. I would prefer to be able to use standard Python tools that work with any project. Or if I do need to use your tool of choice, then I would expect the specific commands I would need to use to all be documented clearly.
I could submit a PR which updated the documentation, but I'm not sure what the recommended best practices are here. I am simply bringing this to the attention of the maintainers with the hopes that more consideration will be given to this in the future.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/hooks.py`
Content:
```
1 import re
2 from pathlib import Path
3
4 from mkdocs.config.defaults import MkDocsConfig
5 from mkdocs.structure.nav import Page
6
7
8 def _get_language_of_translation_file(path: Path) -> str:
9 with path.open(encoding='utf-8') as f:
10 translation_line = f.readline()
11 m = re.search('^# (.+) translations ', translation_line)
12 assert m
13 return m[1]
14
15
16 def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):
17 if page.file.src_uri == 'user-guide/choosing-your-theme.md':
18 here = Path(config.config_file_path).parent
19
20 def replacement(m: re.Match) -> str:
21 lines = []
22 for d in sorted(here.glob(m[2])):
23 lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))
24 lines.append(f'{m[1]}`{d.name}`: {lang}')
25 return '\n'.join(lines)
26
27 return re.sub(
28 r'^( *\* )\(see the list of existing directories `(.+)`\)$',
29 replacement,
30 markdown,
31 flags=re.MULTILINE,
32 )
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/hooks.py b/docs/hooks.py
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -1,8 +1,12 @@
+from __future__ import annotations
+
import re
from pathlib import Path
+from typing import TYPE_CHECKING
-from mkdocs.config.defaults import MkDocsConfig
-from mkdocs.structure.nav import Page
+if TYPE_CHECKING:
+ from mkdocs.config.defaults import MkDocsConfig
+ from mkdocs.structure.nav import Page
def _get_language_of_translation_file(path: Path) -> str:
@@ -13,7 +17,7 @@
return m[1]
-def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):
+def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:
if page.file.src_uri == 'user-guide/choosing-your-theme.md':
here = Path(config.config_file_path).parent
|
{"golden_diff": "diff --git a/docs/hooks.py b/docs/hooks.py\n--- a/docs/hooks.py\n+++ b/docs/hooks.py\n@@ -1,8 +1,12 @@\n+from __future__ import annotations\n+\n import re\n from pathlib import Path\n+from typing import TYPE_CHECKING\n \n-from mkdocs.config.defaults import MkDocsConfig\n-from mkdocs.structure.nav import Page\n+if TYPE_CHECKING:\n+ from mkdocs.config.defaults import MkDocsConfig\n+ from mkdocs.structure.nav import Page\n \n \n def _get_language_of_translation_file(path: Path) -> str:\n@@ -13,7 +17,7 @@\n return m[1]\n \n \n-def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):\n+def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n", "issue": "Building MkDocs' documentation\nWhen I began working on #3493 I needed to be able to run the dev server with MkDocs' own documentation, which was more difficult than it should have been.\r\n\r\nFirst, let me say that I always work from a venv, and created a new one to start my work. In the past, one could simply do `pip install -r requirements/docs.txt` and then `mkdocs serve` and it worked. But I had to jump through a lot of hoops and learn new tools to get things working this time.\r\n\r\nTo be clear, I am not suggesting that the old tools should be brought back. Nor am I suggesting that my preferred tools be used. However, I could find no documentation about how to proceed. Eventually, I did find documentation that hatch is used for tests, and looking at `pyproject.toml` I could see a config for a hatch `docs` env. But not having ever used that tool before, it took be multiple tries (and searches) to work out how to even use that env. But, I'm getting ahead of myself...\r\n\r\nAfter realizing that there were no requirements.txt files, I next looked for optional dependencies defined in `pyproject.toml`. I was hoping to maybe do `pip install .[docs]` (`.` rather than `markdown` because I was working from the working tree of the git repo). When I determined that that wasn't an option, I began looking into the hatch options. Finally in some random question in some forum I found an explanation of how to run the `shell` subcommand with an alternate env: `hatch -e docs shell`.\r\n\r\nAnd then I could finally run `mkdocs serve`. Except that I got an error about missing `po` files, which is weird because I am using English, which should work without any translations being defined. Finally, after generating `po` and `mo` files, I could run the dev server and begin my work.\r\n\r\nAll of this led me to believe that the current maintainers are not ever running the dev server with MkDocs documentation. And I also could not find any automations for deploying the documentation, so I couldn't even use that as a reference.\r\n\r\nAgain, I am not being critical of the tool choices. I see that a switch was made from tox to hatch. That's fine if that is the tool that the maintainers what to use. But for an occasional contributor, I would prefer to not need to learn these tools. I would prefer to be able to use standard Python tools that work with any project. Or if I do need to use your tool of choice, then I would expect the specific commands I would need to use to all be documented clearly.\r\n\r\nI could submit a PR which updated the documentation, but I'm not sure what the recommended best practices are here. I am simply bringing this to the attention of the maintainers with the hopes that more consideration will be given to this in the future.\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom mkdocs.config.defaults import MkDocsConfig\nfrom mkdocs.structure.nav import Page\n\n\ndef _get_language_of_translation_file(path: Path) -> str:\n with path.open(encoding='utf-8') as f:\n translation_line = f.readline()\n m = re.search('^# (.+) translations ', translation_line)\n assert m\n return m[1]\n\n\ndef on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n\n def replacement(m: re.Match) -> str:\n lines = []\n for d in sorted(here.glob(m[2])):\n lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))\n lines.append(f'{m[1]}`{d.name}`: {lang}')\n return '\\n'.join(lines)\n\n return re.sub(\n r'^( *\\* )\\(see the list of existing directories `(.+)`\\)$',\n replacement,\n markdown,\n flags=re.MULTILINE,\n )\n", "path": "docs/hooks.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mkdocs.config.defaults import MkDocsConfig\n from mkdocs.structure.nav import Page\n\n\ndef _get_language_of_translation_file(path: Path) -> str:\n with path.open(encoding='utf-8') as f:\n translation_line = f.readline()\n m = re.search('^# (.+) translations ', translation_line)\n assert m\n return m[1]\n\n\ndef on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n\n def replacement(m: re.Match) -> str:\n lines = []\n for d in sorted(here.glob(m[2])):\n lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))\n lines.append(f'{m[1]}`{d.name}`: {lang}')\n return '\\n'.join(lines)\n\n return re.sub(\n r'^( *\\* )\\(see the list of existing directories `(.+)`\\)$',\n replacement,\n markdown,\n flags=re.MULTILINE,\n )\n", "path": "docs/hooks.py"}]}
| 1,179 | 220 |
gh_patches_debug_27740
|
rasdani/github-patches
|
git_diff
|
facebookresearch__nevergrad-188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation is out of date for 0.2.0
Trying to run examples like:
https://github.com/facebookresearch/nevergrad/blob/master/docs/machinelearning.md
gives errors.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nevergrad/instrumentation/variables.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 # This source code is licensed under the MIT license found in the
3 # LICENSE file in the root directory of this source tree.
4
5 from typing import List, Optional, TypeVar, Union, Sequence, Any
6 import numpy as np
7 from . import discretization
8 from ..common.typetools import ArrayLike
9 from . import transforms
10 from . import utils
11
12
13 X = TypeVar("X")
14
15
16 class SoftmaxCategorical(utils.Variable[X]):
17 """Discrete set of n values transformed to a n-dim continuous variable.
18 Each of the dimension encodes a weight for a value, and the softmax of weights
19 provide probabilities for each possible value. A random value is sampled from
20 this distribution.
21
22 Parameter
23 ---------
24 possibilities: list
25 a list of possible values for the variable
26
27 Note
28 ----
29 Since the chosen value is drawn randomly, the use of this variable makes deterministic
30 functions become stochastic, hence "adding noise"
31 """
32
33 def __init__(self, possibilities: List[X], deterministic: bool = False) -> None:
34 self.deterministic = deterministic
35 self.possibilities = list(possibilities)
36
37 @property
38 def dimension(self) -> int:
39 return len(self.possibilities)
40
41 def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X:
42 assert len(data) == len(self.possibilities)
43 deterministic = deterministic | self.deterministic
44 index = int(discretization.softmax_discretization(data, len(self.possibilities), deterministic=deterministic)[0])
45 return self.possibilities[index]
46
47 def argument_to_data(self, arg: X) -> ArrayLike:
48 assert arg in self.possibilities, f'{arg} not in allowed values: {self.possibilities}'
49 return discretization.inverse_softmax_discretization(self.possibilities.index(arg), len(self.possibilities))
50
51 def get_summary(self, data: ArrayLike) -> str:
52 output = self.data_to_argument(data, deterministic=True)
53 probas = discretization.softmax_probas(np.array(data, copy=False))
54 proba_str = ", ".join([f'"{s}": {round(100 * p)}%' for s, p in zip(self.possibilities, probas)])
55 return f"Value {output}, from data: {data} yielding probas: {proba_str}"
56
57 def _short_repr(self) -> str:
58 return "SC({}|{})".format(",".join([str(x) for x in self.possibilities]), int(self.deterministic))
59
60
61 class OrderedDiscrete(SoftmaxCategorical[X]):
62 """Discrete list of n values transformed to a 1-dim discontinuous variable.
63 A gaussian input yields a uniform distribution on the list of variables.
64
65 Parameter
66 ---------
67 possibilities: list
68 a list of possible values for the variable
69
70 Note
71 ----
72 The variables are assumed to be ordered.
73 """
74
75 @property
76 def dimension(self) -> int:
77 return 1
78
79 def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X: # pylint: disable=arguments-differ, unused-argument
80 assert len(data) == 1
81 index = discretization.threshold_discretization(data, arity=len(self.possibilities))[0]
82 return self.possibilities[index]
83
84 def argument_to_data(self, arg: X) -> ArrayLike:
85 assert arg in self.possibilities, f'{arg} not in allowed values: {self.possibilities}'
86 index = self.possibilities.index(arg)
87 return discretization.inverse_threshold_discretization([index], len(self.possibilities))
88
89 def _short_repr(self) -> str:
90 return "OD({})".format(",".join([str(x) for x in self.possibilities]))
91
92
93 Y = Union[float, np.ndarray]
94
95
96 class Gaussian(utils.Variable[Y]):
97 """Gaussian variable with a mean and a standard deviation, and
98 possibly a shape (when using directly in Python)
99 The output will simply be mean + std * data
100 """
101
102 def __init__(self, mean: float, std: float, shape: Optional[Sequence[int]] = None) -> None:
103 self.mean = mean
104 self.std = std
105 self.shape = shape
106
107 @property
108 def dimension(self) -> int:
109 return 1 if self.shape is None else int(np.prod(self.shape))
110
111 def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y:
112 assert len(data) == self.dimension
113 x = data[0] if self.shape is None else np.reshape(data, self.shape)
114 return self.std * x + self.mean
115
116 def argument_to_data(self, arg: Y) -> ArrayLike:
117 return [(arg - self.mean) / self.std]
118
119 def _short_repr(self) -> str:
120 return f"G({self.mean},{self.std})"
121
122
123 class _Constant(utils.Variable[X]):
124 """Fake variable so that constant variables can fit into the
125 pipeline.
126 """
127
128 def __init__(self, value: X) -> None:
129 self.value = value
130
131 @classmethod
132 def convert_non_instrument(cls, x: Union[X, utils.Variable[X]]) -> utils.Variable[X]:
133 return x if isinstance(x, utils.Variable) else cls(x)
134
135 @property
136 def dimension(self) -> int:
137 return 0
138
139 def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X: # pylint: disable=unused-argument
140 return self.value
141
142 def argument_to_data(self, arg: X) -> ArrayLike:
143 assert arg == self.value, f'{arg} != {self.value}'
144 return []
145
146 def get_summary(self, data: ArrayLike) -> str:
147 raise RuntimeError("Constant summary should not be called")
148
149 def _short_repr(self) -> str:
150 return f"{self.value}"
151
152
153 class Array(utils.Variable[Y]):
154 """Array variable of a given shape, on which several transforms can be applied.
155
156 Parameters
157 ----------
158 *dims: int
159 dimensions of the array (elements of shape)
160
161 Note
162 ----
163 Interesting methods (which can be chained):
164 - asfloat(): converts the array into a float (only for arrays with 1 element)
165 - with_transform(transform): apply a transform to the array
166 - affined(a, b): applies a*x+b
167 - bounded(min_val, max_val, transform="tanh"): applies a transform ("tanh" or "arctan")
168 so that output values are in range [min_val, max_val]
169 - exponentiated(base, coeff): applies base**(coeff * x)
170 """
171
172 def __init__(self, *dims: int) -> None:
173 self.transforms: List[Any] = []
174 self.shape = tuple(dims)
175 self._asfloat = False
176
177 @property
178 def dimension(self) -> int:
179 return int(np.prod(self.shape))
180
181 def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> Y: # pylint: disable=unused-argument
182 assert len(data) == self.dimension
183 array = np.array(data, copy=False)
184 for transf in self.transforms:
185 array = transf.forward(array)
186 if self._asfloat:
187 return float(array[0])
188 return array.reshape(self.shape)
189
190 def argument_to_data(self, arg: Y) -> np.ndarray:
191 if self._asfloat:
192 output = np.array([arg])
193 else:
194 output = np.array(arg, copy=False).ravel()
195 for transf in reversed(self.transforms):
196 output = transf.backward(output)
197 return output
198
199 def _short_repr(self) -> str:
200 dims = ",".join(str(d) for d in self.shape)
201 transf = "" if not self.transforms else (",[" + ",".join(f"{t:short}" for t in self.transforms) + "]")
202 fl = "" if not self._asfloat else "f"
203 return f"A({dims}{transf}){fl}"
204
205 def asfloat(self) -> 'Array':
206 if self.dimension != 1:
207 raise RuntimeError("Only Arrays with 1 element can be cast to float")
208 self._asfloat = True
209 return self
210
211 def with_transform(self, transform: transforms.Transform) -> 'Array':
212 self.transforms.append(transform)
213 return self
214
215 def exponentiated(self, base: float, coeff: float) -> 'Array':
216 """Exponentiation transform base ** (coeff * x)
217 This can for instance be used for to get a logarithmicly distruted values 10**(-[1, 2, 3]).
218
219 Parameters
220 ----------
221 base: float
222 coeff: float
223 """
224 return self.with_transform(transforms.Exponentiate(base=base, coeff=coeff))
225
226 def affined(self, a: float, b: float = 0.) -> 'Array':
227 """Affine transform a * x + b
228
229 Parameters
230 ----------
231 a: float
232 b: float
233 """
234 return self.with_transform(transforms.Affine(a=a, b=b))
235
236 def bounded(self, min_val: float, max_val: float, transform: str = "tanh") -> 'Array':
237 """Bounds all real values into [min_val, max_val] using a tanh transform.
238 Beware, tanh goes very fast to its limits.
239
240 Parameters
241 ----------
242 min_val: float
243 max_val: float
244 transform: str
245 either "tanh" or "arctan" (note that "tanh" reaches the boundaries really quickly,
246 while "arctan" is much softer)
247 """
248 if transform not in ["tanh", "arctan"]:
249 raise ValueError("Only 'tanh' and 'arctan' are allowed as transform")
250 Transf = transforms.ArctanBound if transform == "arctan" else transforms.TanhBound
251 return self.with_transform(Transf(min_val=min_val, max_val=max_val))
252
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nevergrad/instrumentation/variables.py b/nevergrad/instrumentation/variables.py
--- a/nevergrad/instrumentation/variables.py
+++ b/nevergrad/instrumentation/variables.py
@@ -58,7 +58,7 @@
return "SC({}|{})".format(",".join([str(x) for x in self.possibilities]), int(self.deterministic))
-class OrderedDiscrete(SoftmaxCategorical[X]):
+class OrderedDiscrete(utils.Variable[X]):
"""Discrete list of n values transformed to a 1-dim discontinuous variable.
A gaussian input yields a uniform distribution on the list of variables.
@@ -72,6 +72,9 @@
The variables are assumed to be ordered.
"""
+ def __init__(self, possibilities: List[X]) -> None:
+ self.possibilities = list(possibilities)
+
@property
def dimension(self) -> int:
return 1
@@ -108,7 +111,7 @@
def dimension(self) -> int:
return 1 if self.shape is None else int(np.prod(self.shape))
- def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y:
+ def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y: # pylint: disable=unused-argument
assert len(data) == self.dimension
x = data[0] if self.shape is None else np.reshape(data, self.shape)
return self.std * x + self.mean
|
{"golden_diff": "diff --git a/nevergrad/instrumentation/variables.py b/nevergrad/instrumentation/variables.py\n--- a/nevergrad/instrumentation/variables.py\n+++ b/nevergrad/instrumentation/variables.py\n@@ -58,7 +58,7 @@\n return \"SC({}|{})\".format(\",\".join([str(x) for x in self.possibilities]), int(self.deterministic))\n \n \n-class OrderedDiscrete(SoftmaxCategorical[X]):\n+class OrderedDiscrete(utils.Variable[X]):\n \"\"\"Discrete list of n values transformed to a 1-dim discontinuous variable.\n A gaussian input yields a uniform distribution on the list of variables.\n \n@@ -72,6 +72,9 @@\n The variables are assumed to be ordered.\n \"\"\"\n \n+ def __init__(self, possibilities: List[X]) -> None:\n+ self.possibilities = list(possibilities)\n+\n @property\n def dimension(self) -> int:\n return 1\n@@ -108,7 +111,7 @@\n def dimension(self) -> int:\n return 1 if self.shape is None else int(np.prod(self.shape))\n \n- def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y:\n+ def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y: # pylint: disable=unused-argument\n assert len(data) == self.dimension\n x = data[0] if self.shape is None else np.reshape(data, self.shape)\n return self.std * x + self.mean\n", "issue": "Documentation is out of date for 0.2.0\nTrying to run examples like:\r\n\r\nhttps://github.com/facebookresearch/nevergrad/blob/master/docs/machinelearning.md\r\n\r\ngives errors.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import List, Optional, TypeVar, Union, Sequence, Any\nimport numpy as np\nfrom . import discretization\nfrom ..common.typetools import ArrayLike\nfrom . import transforms\nfrom . import utils\n\n\nX = TypeVar(\"X\")\n\n\nclass SoftmaxCategorical(utils.Variable[X]):\n \"\"\"Discrete set of n values transformed to a n-dim continuous variable.\n Each of the dimension encodes a weight for a value, and the softmax of weights\n provide probabilities for each possible value. A random value is sampled from\n this distribution.\n\n Parameter\n ---------\n possibilities: list\n a list of possible values for the variable\n\n Note\n ----\n Since the chosen value is drawn randomly, the use of this variable makes deterministic\n functions become stochastic, hence \"adding noise\"\n \"\"\"\n\n def __init__(self, possibilities: List[X], deterministic: bool = False) -> None:\n self.deterministic = deterministic\n self.possibilities = list(possibilities)\n\n @property\n def dimension(self) -> int:\n return len(self.possibilities)\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X:\n assert len(data) == len(self.possibilities)\n deterministic = deterministic | self.deterministic\n index = int(discretization.softmax_discretization(data, len(self.possibilities), deterministic=deterministic)[0])\n return self.possibilities[index]\n\n def argument_to_data(self, arg: X) -> ArrayLike:\n assert arg in self.possibilities, f'{arg} not in allowed values: {self.possibilities}'\n return discretization.inverse_softmax_discretization(self.possibilities.index(arg), len(self.possibilities))\n\n def get_summary(self, data: ArrayLike) -> str:\n output = self.data_to_argument(data, deterministic=True)\n probas = discretization.softmax_probas(np.array(data, copy=False))\n proba_str = \", \".join([f'\"{s}\": {round(100 * p)}%' for s, p in zip(self.possibilities, probas)])\n return f\"Value {output}, from data: {data} yielding probas: {proba_str}\"\n\n def _short_repr(self) -> str:\n return \"SC({}|{})\".format(\",\".join([str(x) for x in self.possibilities]), int(self.deterministic))\n\n\nclass OrderedDiscrete(SoftmaxCategorical[X]):\n \"\"\"Discrete list of n values transformed to a 1-dim discontinuous variable.\n A gaussian input yields a uniform distribution on the list of variables.\n\n Parameter\n ---------\n possibilities: list\n a list of possible values for the variable\n\n Note\n ----\n The variables are assumed to be ordered.\n \"\"\"\n\n @property\n def dimension(self) -> int:\n return 1\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X: # pylint: disable=arguments-differ, unused-argument\n assert len(data) == 1\n index = discretization.threshold_discretization(data, arity=len(self.possibilities))[0]\n return self.possibilities[index]\n\n def argument_to_data(self, arg: X) -> ArrayLike:\n assert arg in self.possibilities, f'{arg} not in allowed values: {self.possibilities}'\n index = self.possibilities.index(arg)\n return discretization.inverse_threshold_discretization([index], len(self.possibilities))\n\n def _short_repr(self) -> str:\n return \"OD({})\".format(\",\".join([str(x) for x in self.possibilities]))\n\n\nY = Union[float, np.ndarray]\n\n\nclass Gaussian(utils.Variable[Y]):\n \"\"\"Gaussian variable with a mean and a standard deviation, and\n possibly a shape (when using directly in Python)\n The output will simply be mean + std * data\n \"\"\"\n\n def __init__(self, mean: float, std: float, shape: Optional[Sequence[int]] = None) -> None:\n self.mean = mean\n self.std = std\n self.shape = shape\n\n @property\n def dimension(self) -> int:\n return 1 if self.shape is None else int(np.prod(self.shape))\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y:\n assert len(data) == self.dimension\n x = data[0] if self.shape is None else np.reshape(data, self.shape)\n return self.std * x + self.mean\n\n def argument_to_data(self, arg: Y) -> ArrayLike:\n return [(arg - self.mean) / self.std]\n\n def _short_repr(self) -> str:\n return f\"G({self.mean},{self.std})\"\n\n\nclass _Constant(utils.Variable[X]):\n \"\"\"Fake variable so that constant variables can fit into the\n pipeline.\n \"\"\"\n\n def __init__(self, value: X) -> None:\n self.value = value\n\n @classmethod\n def convert_non_instrument(cls, x: Union[X, utils.Variable[X]]) -> utils.Variable[X]:\n return x if isinstance(x, utils.Variable) else cls(x)\n\n @property\n def dimension(self) -> int:\n return 0\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X: # pylint: disable=unused-argument\n return self.value\n\n def argument_to_data(self, arg: X) -> ArrayLike:\n assert arg == self.value, f'{arg} != {self.value}'\n return []\n\n def get_summary(self, data: ArrayLike) -> str:\n raise RuntimeError(\"Constant summary should not be called\")\n\n def _short_repr(self) -> str:\n return f\"{self.value}\"\n\n\nclass Array(utils.Variable[Y]):\n \"\"\"Array variable of a given shape, on which several transforms can be applied.\n\n Parameters\n ----------\n *dims: int\n dimensions of the array (elements of shape)\n\n Note\n ----\n Interesting methods (which can be chained):\n - asfloat(): converts the array into a float (only for arrays with 1 element)\n - with_transform(transform): apply a transform to the array\n - affined(a, b): applies a*x+b\n - bounded(min_val, max_val, transform=\"tanh\"): applies a transform (\"tanh\" or \"arctan\")\n so that output values are in range [min_val, max_val]\n - exponentiated(base, coeff): applies base**(coeff * x)\n \"\"\"\n\n def __init__(self, *dims: int) -> None:\n self.transforms: List[Any] = []\n self.shape = tuple(dims)\n self._asfloat = False\n\n @property\n def dimension(self) -> int:\n return int(np.prod(self.shape))\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> Y: # pylint: disable=unused-argument\n assert len(data) == self.dimension\n array = np.array(data, copy=False)\n for transf in self.transforms:\n array = transf.forward(array)\n if self._asfloat:\n return float(array[0])\n return array.reshape(self.shape)\n\n def argument_to_data(self, arg: Y) -> np.ndarray:\n if self._asfloat:\n output = np.array([arg])\n else:\n output = np.array(arg, copy=False).ravel()\n for transf in reversed(self.transforms):\n output = transf.backward(output)\n return output\n\n def _short_repr(self) -> str:\n dims = \",\".join(str(d) for d in self.shape)\n transf = \"\" if not self.transforms else (\",[\" + \",\".join(f\"{t:short}\" for t in self.transforms) + \"]\")\n fl = \"\" if not self._asfloat else \"f\"\n return f\"A({dims}{transf}){fl}\"\n\n def asfloat(self) -> 'Array':\n if self.dimension != 1:\n raise RuntimeError(\"Only Arrays with 1 element can be cast to float\")\n self._asfloat = True\n return self\n\n def with_transform(self, transform: transforms.Transform) -> 'Array':\n self.transforms.append(transform)\n return self\n\n def exponentiated(self, base: float, coeff: float) -> 'Array':\n \"\"\"Exponentiation transform base ** (coeff * x)\n This can for instance be used for to get a logarithmicly distruted values 10**(-[1, 2, 3]).\n\n Parameters\n ----------\n base: float\n coeff: float\n \"\"\"\n return self.with_transform(transforms.Exponentiate(base=base, coeff=coeff))\n\n def affined(self, a: float, b: float = 0.) -> 'Array':\n \"\"\"Affine transform a * x + b\n\n Parameters\n ----------\n a: float\n b: float\n \"\"\"\n return self.with_transform(transforms.Affine(a=a, b=b))\n\n def bounded(self, min_val: float, max_val: float, transform: str = \"tanh\") -> 'Array':\n \"\"\"Bounds all real values into [min_val, max_val] using a tanh transform.\n Beware, tanh goes very fast to its limits.\n\n Parameters\n ----------\n min_val: float\n max_val: float\n transform: str\n either \"tanh\" or \"arctan\" (note that \"tanh\" reaches the boundaries really quickly,\n while \"arctan\" is much softer)\n \"\"\"\n if transform not in [\"tanh\", \"arctan\"]:\n raise ValueError(\"Only 'tanh' and 'arctan' are allowed as transform\")\n Transf = transforms.ArctanBound if transform == \"arctan\" else transforms.TanhBound\n return self.with_transform(Transf(min_val=min_val, max_val=max_val))\n", "path": "nevergrad/instrumentation/variables.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import List, Optional, TypeVar, Union, Sequence, Any\nimport numpy as np\nfrom . import discretization\nfrom ..common.typetools import ArrayLike\nfrom . import transforms\nfrom . import utils\n\n\nX = TypeVar(\"X\")\n\n\nclass SoftmaxCategorical(utils.Variable[X]):\n \"\"\"Discrete set of n values transformed to a n-dim continuous variable.\n Each of the dimension encodes a weight for a value, and the softmax of weights\n provide probabilities for each possible value. A random value is sampled from\n this distribution.\n\n Parameter\n ---------\n possibilities: list\n a list of possible values for the variable\n\n Note\n ----\n Since the chosen value is drawn randomly, the use of this variable makes deterministic\n functions become stochastic, hence \"adding noise\"\n \"\"\"\n\n def __init__(self, possibilities: List[X], deterministic: bool = False) -> None:\n self.deterministic = deterministic\n self.possibilities = list(possibilities)\n\n @property\n def dimension(self) -> int:\n return len(self.possibilities)\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X:\n assert len(data) == len(self.possibilities)\n deterministic = deterministic | self.deterministic\n index = int(discretization.softmax_discretization(data, len(self.possibilities), deterministic=deterministic)[0])\n return self.possibilities[index]\n\n def argument_to_data(self, arg: X) -> ArrayLike:\n assert arg in self.possibilities, f'{arg} not in allowed values: {self.possibilities}'\n return discretization.inverse_softmax_discretization(self.possibilities.index(arg), len(self.possibilities))\n\n def get_summary(self, data: ArrayLike) -> str:\n output = self.data_to_argument(data, deterministic=True)\n probas = discretization.softmax_probas(np.array(data, copy=False))\n proba_str = \", \".join([f'\"{s}\": {round(100 * p)}%' for s, p in zip(self.possibilities, probas)])\n return f\"Value {output}, from data: {data} yielding probas: {proba_str}\"\n\n def _short_repr(self) -> str:\n return \"SC({}|{})\".format(\",\".join([str(x) for x in self.possibilities]), int(self.deterministic))\n\n\nclass OrderedDiscrete(utils.Variable[X]):\n \"\"\"Discrete list of n values transformed to a 1-dim discontinuous variable.\n A gaussian input yields a uniform distribution on the list of variables.\n\n Parameter\n ---------\n possibilities: list\n a list of possible values for the variable\n\n Note\n ----\n The variables are assumed to be ordered.\n \"\"\"\n\n def __init__(self, possibilities: List[X]) -> None:\n self.possibilities = list(possibilities)\n\n @property\n def dimension(self) -> int:\n return 1\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X: # pylint: disable=arguments-differ, unused-argument\n assert len(data) == 1\n index = discretization.threshold_discretization(data, arity=len(self.possibilities))[0]\n return self.possibilities[index]\n\n def argument_to_data(self, arg: X) -> ArrayLike:\n assert arg in self.possibilities, f'{arg} not in allowed values: {self.possibilities}'\n index = self.possibilities.index(arg)\n return discretization.inverse_threshold_discretization([index], len(self.possibilities))\n\n def _short_repr(self) -> str:\n return \"OD({})\".format(\",\".join([str(x) for x in self.possibilities]))\n\n\nY = Union[float, np.ndarray]\n\n\nclass Gaussian(utils.Variable[Y]):\n \"\"\"Gaussian variable with a mean and a standard deviation, and\n possibly a shape (when using directly in Python)\n The output will simply be mean + std * data\n \"\"\"\n\n def __init__(self, mean: float, std: float, shape: Optional[Sequence[int]] = None) -> None:\n self.mean = mean\n self.std = std\n self.shape = shape\n\n @property\n def dimension(self) -> int:\n return 1 if self.shape is None else int(np.prod(self.shape))\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = True) -> Y: # pylint: disable=unused-argument\n assert len(data) == self.dimension\n x = data[0] if self.shape is None else np.reshape(data, self.shape)\n return self.std * x + self.mean\n\n def argument_to_data(self, arg: Y) -> ArrayLike:\n return [(arg - self.mean) / self.std]\n\n def _short_repr(self) -> str:\n return f\"G({self.mean},{self.std})\"\n\n\nclass _Constant(utils.Variable[X]):\n \"\"\"Fake variable so that constant variables can fit into the\n pipeline.\n \"\"\"\n\n def __init__(self, value: X) -> None:\n self.value = value\n\n @classmethod\n def convert_non_instrument(cls, x: Union[X, utils.Variable[X]]) -> utils.Variable[X]:\n return x if isinstance(x, utils.Variable) else cls(x)\n\n @property\n def dimension(self) -> int:\n return 0\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> X: # pylint: disable=unused-argument\n return self.value\n\n def argument_to_data(self, arg: X) -> ArrayLike:\n assert arg == self.value, f'{arg} != {self.value}'\n return []\n\n def get_summary(self, data: ArrayLike) -> str:\n raise RuntimeError(\"Constant summary should not be called\")\n\n def _short_repr(self) -> str:\n return f\"{self.value}\"\n\n\nclass Array(utils.Variable[Y]):\n \"\"\"Array variable of a given shape, on which several transforms can be applied.\n\n Parameters\n ----------\n *dims: int\n dimensions of the array (elements of shape)\n\n Note\n ----\n Interesting methods (which can be chained):\n - asfloat(): converts the array into a float (only for arrays with 1 element)\n - with_transform(transform): apply a transform to the array\n - affined(a, b): applies a*x+b\n - bounded(min_val, max_val, transform=\"tanh\"): applies a transform (\"tanh\" or \"arctan\")\n so that output values are in range [min_val, max_val]\n - exponentiated(base, coeff): applies base**(coeff * x)\n \"\"\"\n\n def __init__(self, *dims: int) -> None:\n self.transforms: List[Any] = []\n self.shape = tuple(dims)\n self._asfloat = False\n\n @property\n def dimension(self) -> int:\n return int(np.prod(self.shape))\n\n def data_to_argument(self, data: ArrayLike, deterministic: bool = False) -> Y: # pylint: disable=unused-argument\n assert len(data) == self.dimension\n array = np.array(data, copy=False)\n for transf in self.transforms:\n array = transf.forward(array)\n if self._asfloat:\n return float(array[0])\n return array.reshape(self.shape)\n\n def argument_to_data(self, arg: Y) -> np.ndarray:\n if self._asfloat:\n output = np.array([arg])\n else:\n output = np.array(arg, copy=False).ravel()\n for transf in reversed(self.transforms):\n output = transf.backward(output)\n return output\n\n def _short_repr(self) -> str:\n dims = \",\".join(str(d) for d in self.shape)\n transf = \"\" if not self.transforms else (\",[\" + \",\".join(f\"{t:short}\" for t in self.transforms) + \"]\")\n fl = \"\" if not self._asfloat else \"f\"\n return f\"A({dims}{transf}){fl}\"\n\n def asfloat(self) -> 'Array':\n if self.dimension != 1:\n raise RuntimeError(\"Only Arrays with 1 element can be cast to float\")\n self._asfloat = True\n return self\n\n def with_transform(self, transform: transforms.Transform) -> 'Array':\n self.transforms.append(transform)\n return self\n\n def exponentiated(self, base: float, coeff: float) -> 'Array':\n \"\"\"Exponentiation transform base ** (coeff * x)\n This can for instance be used for to get a logarithmicly distruted values 10**(-[1, 2, 3]).\n\n Parameters\n ----------\n base: float\n coeff: float\n \"\"\"\n return self.with_transform(transforms.Exponentiate(base=base, coeff=coeff))\n\n def affined(self, a: float, b: float = 0.) -> 'Array':\n \"\"\"Affine transform a * x + b\n\n Parameters\n ----------\n a: float\n b: float\n \"\"\"\n return self.with_transform(transforms.Affine(a=a, b=b))\n\n def bounded(self, min_val: float, max_val: float, transform: str = \"tanh\") -> 'Array':\n \"\"\"Bounds all real values into [min_val, max_val] using a tanh transform.\n Beware, tanh goes very fast to its limits.\n\n Parameters\n ----------\n min_val: float\n max_val: float\n transform: str\n either \"tanh\" or \"arctan\" (note that \"tanh\" reaches the boundaries really quickly,\n while \"arctan\" is much softer)\n \"\"\"\n if transform not in [\"tanh\", \"arctan\"]:\n raise ValueError(\"Only 'tanh' and 'arctan' are allowed as transform\")\n Transf = transforms.ArctanBound if transform == \"arctan\" else transforms.TanhBound\n return self.with_transform(Transf(min_val=min_val, max_val=max_val))\n", "path": "nevergrad/instrumentation/variables.py"}]}
| 3,171 | 347 |
gh_patches_debug_33209
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-4697
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docker container as non root user
The current docker container uses root user by default. This caused permission problems from the host side. For example, I can't edit files from binded folders without sudo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/commands/build_config_commands.py`
Content:
```
1 import logging
2 import secrets
3 from pathlib import Path
4 from typing import Any, Dict, List
5
6 from questionary import Separator, prompt
7
8 from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
9 from freqtrade.exceptions import OperationalException
10 from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, available_exchanges
11 from freqtrade.misc import render_template
12
13
14 logger = logging.getLogger(__name__)
15
16
17 def validate_is_int(val):
18 try:
19 _ = int(val)
20 return True
21 except Exception:
22 return False
23
24
25 def validate_is_float(val):
26 try:
27 _ = float(val)
28 return True
29 except Exception:
30 return False
31
32
33 def ask_user_overwrite(config_path: Path) -> bool:
34 questions = [
35 {
36 "type": "confirm",
37 "name": "overwrite",
38 "message": f"File {config_path} already exists. Overwrite?",
39 "default": False,
40 },
41 ]
42 answers = prompt(questions)
43 return answers['overwrite']
44
45
46 def ask_user_config() -> Dict[str, Any]:
47 """
48 Ask user a few questions to build the configuration.
49 Interactive questions built using https://github.com/tmbo/questionary
50 :returns: Dict with keys to put into template
51 """
52 questions: List[Dict[str, Any]] = [
53 {
54 "type": "confirm",
55 "name": "dry_run",
56 "message": "Do you want to enable Dry-run (simulated trades)?",
57 "default": True,
58 },
59 {
60 "type": "text",
61 "name": "stake_currency",
62 "message": "Please insert your stake currency:",
63 "default": 'BTC',
64 },
65 {
66 "type": "text",
67 "name": "stake_amount",
68 "message": "Please insert your stake amount:",
69 "default": "0.01",
70 "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
71 },
72 {
73 "type": "text",
74 "name": "max_open_trades",
75 "message": f"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):",
76 "default": "3",
77 "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val)
78 },
79 {
80 "type": "text",
81 "name": "timeframe",
82 "message": "Please insert your desired timeframe (e.g. 5m):",
83 "default": "5m",
84 },
85 {
86 "type": "text",
87 "name": "fiat_display_currency",
88 "message": "Please insert your display Currency (for reporting):",
89 "default": 'USD',
90 },
91 {
92 "type": "select",
93 "name": "exchange_name",
94 "message": "Select exchange",
95 "choices": [
96 "binance",
97 "binanceus",
98 "bittrex",
99 "kraken",
100 "ftx",
101 Separator(),
102 "other",
103 ],
104 },
105 {
106 "type": "autocomplete",
107 "name": "exchange_name",
108 "message": "Type your exchange name (Must be supported by ccxt)",
109 "choices": available_exchanges(),
110 "when": lambda x: x["exchange_name"] == 'other'
111 },
112 {
113 "type": "password",
114 "name": "exchange_key",
115 "message": "Insert Exchange Key",
116 "when": lambda x: not x['dry_run']
117 },
118 {
119 "type": "password",
120 "name": "exchange_secret",
121 "message": "Insert Exchange Secret",
122 "when": lambda x: not x['dry_run']
123 },
124 {
125 "type": "confirm",
126 "name": "telegram",
127 "message": "Do you want to enable Telegram?",
128 "default": False,
129 },
130 {
131 "type": "password",
132 "name": "telegram_token",
133 "message": "Insert Telegram token",
134 "when": lambda x: x['telegram']
135 },
136 {
137 "type": "text",
138 "name": "telegram_chat_id",
139 "message": "Insert Telegram chat id",
140 "when": lambda x: x['telegram']
141 },
142 {
143 "type": "confirm",
144 "name": "api_server",
145 "message": "Do you want to enable the Rest API (includes FreqUI)?",
146 "default": False,
147 },
148 {
149 "type": "text",
150 "name": "api_server_listen_addr",
151 "message": "Insert Api server Listen Address (best left untouched default!)",
152 "default": "127.0.0.1",
153 "when": lambda x: x['api_server']
154 },
155 {
156 "type": "text",
157 "name": "api_server_username",
158 "message": "Insert api-server username",
159 "default": "freqtrader",
160 "when": lambda x: x['api_server']
161 },
162 {
163 "type": "text",
164 "name": "api_server_password",
165 "message": "Insert api-server password",
166 "when": lambda x: x['api_server']
167 },
168 ]
169 answers = prompt(questions)
170
171 if not answers:
172 # Interrupted questionary sessions return an empty dict.
173 raise OperationalException("User interrupted interactive questions.")
174
175 # Force JWT token to be a random string
176 answers['api_server_jwt_key'] = secrets.token_hex()
177
178 return answers
179
180
181 def deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None:
182 """
183 Applies selections to the template and writes the result to config_path
184 :param config_path: Path object for new config file. Should not exist yet
185 :param selecions: Dict containing selections taken by the user.
186 """
187 from jinja2.exceptions import TemplateNotFound
188 try:
189 exchange_template = MAP_EXCHANGE_CHILDCLASS.get(
190 selections['exchange_name'], selections['exchange_name'])
191
192 selections['exchange'] = render_template(
193 templatefile=f"subtemplates/exchange_{exchange_template}.j2",
194 arguments=selections
195 )
196 except TemplateNotFound:
197 selections['exchange'] = render_template(
198 templatefile="subtemplates/exchange_generic.j2",
199 arguments=selections
200 )
201
202 config_text = render_template(templatefile='base_config.json.j2',
203 arguments=selections)
204
205 logger.info(f"Writing config to `{config_path}`.")
206 logger.info(
207 "Please make sure to check the configuration contents and adjust settings to your needs.")
208
209 config_path.write_text(config_text)
210
211
212 def start_new_config(args: Dict[str, Any]) -> None:
213 """
214 Create a new strategy from a template
215 Asking the user questions to fill out the templateaccordingly.
216 """
217
218 config_path = Path(args['config'][0])
219 if config_path.exists():
220 overwrite = ask_user_overwrite(config_path)
221 if overwrite:
222 config_path.unlink()
223 else:
224 raise OperationalException(
225 f"Configuration file `{config_path}` already exists. "
226 "Please delete it or use a different configuration file name.")
227 selections = ask_user_config()
228 deploy_new_config(config_path, selections)
229
```
Path: `freqtrade/configuration/directory_operations.py`
Content:
```
1 import logging
2 import shutil
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from freqtrade.constants import USER_DATA_FILES
7 from freqtrade.exceptions import OperationalException
8
9
10 logger = logging.getLogger(__name__)
11
12
13 def create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:
14
15 folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data")
16 if not datadir:
17 # set datadir
18 exchange_name = config.get('exchange', {}).get('name').lower()
19 folder = folder.joinpath(exchange_name)
20
21 if not folder.is_dir():
22 folder.mkdir(parents=True)
23 logger.info(f'Created data directory: {datadir}')
24 return folder
25
26
27 def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:
28 """
29 Create userdata directory structure.
30 if create_dir is True, then the parent-directory will be created if it does not exist.
31 Sub-directories will always be created if the parent directory exists.
32 Raises OperationalException if given a non-existing directory.
33 :param directory: Directory to check
34 :param create_dir: Create directory if it does not exist.
35 :return: Path object containing the directory
36 """
37 sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "logs",
38 "notebooks", "plot", "strategies", ]
39 folder = Path(directory)
40 if not folder.is_dir():
41 if create_dir:
42 folder.mkdir(parents=True)
43 logger.info(f'Created user-data directory: {folder}')
44 else:
45 raise OperationalException(
46 f"Directory `{folder}` does not exist. "
47 "Please use `freqtrade create-userdir` to create a user directory")
48
49 # Create required subdirectories
50 for f in sub_dirs:
51 subfolder = folder / f
52 if not subfolder.is_dir():
53 subfolder.mkdir(parents=False)
54 return folder
55
56
57 def copy_sample_files(directory: Path, overwrite: bool = False) -> None:
58 """
59 Copy files from templates to User data directory.
60 :param directory: Directory to copy data to
61 :param overwrite: Overwrite existing sample files
62 """
63 if not directory.is_dir():
64 raise OperationalException(f"Directory `{directory}` does not exist.")
65 sourcedir = Path(__file__).parents[1] / "templates"
66 for source, target in USER_DATA_FILES.items():
67 targetdir = directory / target
68 if not targetdir.is_dir():
69 raise OperationalException(f"Directory `{targetdir}` does not exist.")
70 targetfile = targetdir / source
71 if targetfile.exists():
72 if not overwrite:
73 logger.warning(f"File `{targetfile}` exists already, not deploying sample file.")
74 continue
75 logger.warning(f"File `{targetfile}` exists already, overwriting.")
76 shutil.copy(str(sourcedir / source), str(targetfile))
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/freqtrade/commands/build_config_commands.py b/freqtrade/commands/build_config_commands.py
--- a/freqtrade/commands/build_config_commands.py
+++ b/freqtrade/commands/build_config_commands.py
@@ -5,6 +5,7 @@
from questionary import Separator, prompt
+from freqtrade.configuration.directory_operations import chown_user_directory
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, available_exchanges
@@ -216,6 +217,7 @@
"""
config_path = Path(args['config'][0])
+ chown_user_directory(config_path.parent)
if config_path.exists():
overwrite = ask_user_overwrite(config_path)
if overwrite:
diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py
--- a/freqtrade/configuration/directory_operations.py
+++ b/freqtrade/configuration/directory_operations.py
@@ -24,6 +24,21 @@
return folder
+def chown_user_directory(directory: Path) -> None:
+ """
+ Use Sudo to change permissions of the home-directory if necessary
+ Only applies when running in docker!
+ """
+ import os
+ if os.environ.get('FT_APP_ENV') == 'docker':
+ try:
+ import subprocess
+ subprocess.check_output(
+ ['sudo', 'chown', '-R', 'ftuser:', str(directory.resolve())])
+ except Exception:
+ logger.warning(f"Could not chown {directory}")
+
+
def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:
"""
Create userdata directory structure.
@@ -37,6 +52,7 @@
sub_dirs = ["backtest_results", "data", "hyperopts", "hyperopt_results", "logs",
"notebooks", "plot", "strategies", ]
folder = Path(directory)
+ chown_user_directory(folder)
if not folder.is_dir():
if create_dir:
folder.mkdir(parents=True)
|
{"golden_diff": "diff --git a/freqtrade/commands/build_config_commands.py b/freqtrade/commands/build_config_commands.py\n--- a/freqtrade/commands/build_config_commands.py\n+++ b/freqtrade/commands/build_config_commands.py\n@@ -5,6 +5,7 @@\n \n from questionary import Separator, prompt\n \n+from freqtrade.configuration.directory_operations import chown_user_directory\n from freqtrade.constants import UNLIMITED_STAKE_AMOUNT\n from freqtrade.exceptions import OperationalException\n from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, available_exchanges\n@@ -216,6 +217,7 @@\n \"\"\"\n \n config_path = Path(args['config'][0])\n+ chown_user_directory(config_path.parent)\n if config_path.exists():\n overwrite = ask_user_overwrite(config_path)\n if overwrite:\ndiff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py\n--- a/freqtrade/configuration/directory_operations.py\n+++ b/freqtrade/configuration/directory_operations.py\n@@ -24,6 +24,21 @@\n return folder\n \n \n+def chown_user_directory(directory: Path) -> None:\n+ \"\"\"\n+ Use Sudo to change permissions of the home-directory if necessary\n+ Only applies when running in docker!\n+ \"\"\"\n+ import os\n+ if os.environ.get('FT_APP_ENV') == 'docker':\n+ try:\n+ import subprocess\n+ subprocess.check_output(\n+ ['sudo', 'chown', '-R', 'ftuser:', str(directory.resolve())])\n+ except Exception:\n+ logger.warning(f\"Could not chown {directory}\")\n+\n+\n def create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n@@ -37,6 +52,7 @@\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n+ chown_user_directory(folder)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n", "issue": "Docker container as non root user\nThe current docker container uses root user by default. This caused permission problems from the host side. For example, I can't edit files from binded folders without sudo. \n", "before_files": [{"content": "import logging\nimport secrets\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nfrom questionary import Separator, prompt\n\nfrom freqtrade.constants import UNLIMITED_STAKE_AMOUNT\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, available_exchanges\nfrom freqtrade.misc import render_template\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef validate_is_int(val):\n try:\n _ = int(val)\n return True\n except Exception:\n return False\n\n\ndef validate_is_float(val):\n try:\n _ = float(val)\n return True\n except Exception:\n return False\n\n\ndef ask_user_overwrite(config_path: Path) -> bool:\n questions = [\n {\n \"type\": \"confirm\",\n \"name\": \"overwrite\",\n \"message\": f\"File {config_path} already exists. Overwrite?\",\n \"default\": False,\n },\n ]\n answers = prompt(questions)\n return answers['overwrite']\n\n\ndef ask_user_config() -> Dict[str, Any]:\n \"\"\"\n Ask user a few questions to build the configuration.\n Interactive questions built using https://github.com/tmbo/questionary\n :returns: Dict with keys to put into template\n \"\"\"\n questions: List[Dict[str, Any]] = [\n {\n \"type\": \"confirm\",\n \"name\": \"dry_run\",\n \"message\": \"Do you want to enable Dry-run (simulated trades)?\",\n \"default\": True,\n },\n {\n \"type\": \"text\",\n \"name\": \"stake_currency\",\n \"message\": \"Please insert your stake currency:\",\n \"default\": 'BTC',\n },\n {\n \"type\": \"text\",\n \"name\": \"stake_amount\",\n \"message\": \"Please insert your stake amount:\",\n \"default\": \"0.01\",\n \"validate\": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),\n },\n {\n \"type\": \"text\",\n \"name\": \"max_open_trades\",\n \"message\": f\"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):\",\n \"default\": \"3\",\n \"validate\": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val)\n },\n {\n \"type\": \"text\",\n \"name\": \"timeframe\",\n \"message\": \"Please insert your desired timeframe (e.g. 5m):\",\n \"default\": \"5m\",\n },\n {\n \"type\": \"text\",\n \"name\": \"fiat_display_currency\",\n \"message\": \"Please insert your display Currency (for reporting):\",\n \"default\": 'USD',\n },\n {\n \"type\": \"select\",\n \"name\": \"exchange_name\",\n \"message\": \"Select exchange\",\n \"choices\": [\n \"binance\",\n \"binanceus\",\n \"bittrex\",\n \"kraken\",\n \"ftx\",\n Separator(),\n \"other\",\n ],\n },\n {\n \"type\": \"autocomplete\",\n \"name\": \"exchange_name\",\n \"message\": \"Type your exchange name (Must be supported by ccxt)\",\n \"choices\": available_exchanges(),\n \"when\": lambda x: x[\"exchange_name\"] == 'other'\n },\n {\n \"type\": \"password\",\n \"name\": \"exchange_key\",\n \"message\": \"Insert Exchange Key\",\n \"when\": lambda x: not x['dry_run']\n },\n {\n \"type\": \"password\",\n \"name\": \"exchange_secret\",\n \"message\": \"Insert Exchange Secret\",\n \"when\": lambda x: not x['dry_run']\n },\n {\n \"type\": \"confirm\",\n \"name\": \"telegram\",\n \"message\": \"Do you want to enable Telegram?\",\n \"default\": False,\n },\n {\n \"type\": \"password\",\n \"name\": \"telegram_token\",\n \"message\": \"Insert Telegram token\",\n \"when\": lambda x: x['telegram']\n },\n {\n \"type\": \"text\",\n \"name\": \"telegram_chat_id\",\n \"message\": \"Insert Telegram chat id\",\n \"when\": lambda x: x['telegram']\n },\n {\n \"type\": \"confirm\",\n \"name\": \"api_server\",\n \"message\": \"Do you want to enable the Rest API (includes FreqUI)?\",\n \"default\": False,\n },\n {\n \"type\": \"text\",\n \"name\": \"api_server_listen_addr\",\n \"message\": \"Insert Api server Listen Address (best left untouched default!)\",\n \"default\": \"127.0.0.1\",\n \"when\": lambda x: x['api_server']\n },\n {\n \"type\": \"text\",\n \"name\": \"api_server_username\",\n \"message\": \"Insert api-server username\",\n \"default\": \"freqtrader\",\n \"when\": lambda x: x['api_server']\n },\n {\n \"type\": \"text\",\n \"name\": \"api_server_password\",\n \"message\": \"Insert api-server password\",\n \"when\": lambda x: x['api_server']\n },\n ]\n answers = prompt(questions)\n\n if not answers:\n # Interrupted questionary sessions return an empty dict.\n raise OperationalException(\"User interrupted interactive questions.\")\n\n # Force JWT token to be a random string\n answers['api_server_jwt_key'] = secrets.token_hex()\n\n return answers\n\n\ndef deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None:\n \"\"\"\n Applies selections to the template and writes the result to config_path\n :param config_path: Path object for new config file. Should not exist yet\n :param selecions: Dict containing selections taken by the user.\n \"\"\"\n from jinja2.exceptions import TemplateNotFound\n try:\n exchange_template = MAP_EXCHANGE_CHILDCLASS.get(\n selections['exchange_name'], selections['exchange_name'])\n\n selections['exchange'] = render_template(\n templatefile=f\"subtemplates/exchange_{exchange_template}.j2\",\n arguments=selections\n )\n except TemplateNotFound:\n selections['exchange'] = render_template(\n templatefile=\"subtemplates/exchange_generic.j2\",\n arguments=selections\n )\n\n config_text = render_template(templatefile='base_config.json.j2',\n arguments=selections)\n\n logger.info(f\"Writing config to `{config_path}`.\")\n logger.info(\n \"Please make sure to check the configuration contents and adjust settings to your needs.\")\n\n config_path.write_text(config_text)\n\n\ndef start_new_config(args: Dict[str, Any]) -> None:\n \"\"\"\n Create a new strategy from a template\n Asking the user questions to fill out the templateaccordingly.\n \"\"\"\n\n config_path = Path(args['config'][0])\n if config_path.exists():\n overwrite = ask_user_overwrite(config_path)\n if overwrite:\n config_path.unlink()\n else:\n raise OperationalException(\n f\"Configuration file `{config_path}` already exists. \"\n \"Please delete it or use a different configuration file name.\")\n selections = ask_user_config()\n deploy_new_config(config_path, selections)\n", "path": "freqtrade/commands/build_config_commands.py"}, {"content": "import logging\nimport shutil\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom freqtrade.constants import USER_DATA_FILES\nfrom freqtrade.exceptions import OperationalException\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:\n\n folder = Path(datadir) if datadir else Path(f\"{config['user_data_dir']}/data\")\n if not datadir:\n # set datadir\n exchange_name = config.get('exchange', {}).get('name').lower()\n folder = folder.joinpath(exchange_name)\n\n if not folder.is_dir():\n folder.mkdir(parents=True)\n logger.info(f'Created data directory: {datadir}')\n return folder\n\n\ndef create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n if create_dir is True, then the parent-directory will be created if it does not exist.\n Sub-directories will always be created if the parent directory exists.\n Raises OperationalException if given a non-existing directory.\n :param directory: Directory to check\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n logger.info(f'Created user-data directory: {folder}')\n else:\n raise OperationalException(\n f\"Directory `{folder}` does not exist. \"\n \"Please use `freqtrade create-userdir` to create a user directory\")\n\n # Create required subdirectories\n for f in sub_dirs:\n subfolder = folder / f\n if not subfolder.is_dir():\n subfolder.mkdir(parents=False)\n return folder\n\n\ndef copy_sample_files(directory: Path, overwrite: bool = False) -> None:\n \"\"\"\n Copy files from templates to User data directory.\n :param directory: Directory to copy data to\n :param overwrite: Overwrite existing sample files\n \"\"\"\n if not directory.is_dir():\n raise OperationalException(f\"Directory `{directory}` does not exist.\")\n sourcedir = Path(__file__).parents[1] / \"templates\"\n for source, target in USER_DATA_FILES.items():\n targetdir = directory / target\n if not targetdir.is_dir():\n raise OperationalException(f\"Directory `{targetdir}` does not exist.\")\n targetfile = targetdir / source\n if targetfile.exists():\n if not overwrite:\n logger.warning(f\"File `{targetfile}` exists already, not deploying sample file.\")\n continue\n logger.warning(f\"File `{targetfile}` exists already, overwriting.\")\n shutil.copy(str(sourcedir / source), str(targetfile))\n", "path": "freqtrade/configuration/directory_operations.py"}], "after_files": [{"content": "import logging\nimport secrets\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nfrom questionary import Separator, prompt\n\nfrom freqtrade.configuration.directory_operations import chown_user_directory\nfrom freqtrade.constants import UNLIMITED_STAKE_AMOUNT\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, available_exchanges\nfrom freqtrade.misc import render_template\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef validate_is_int(val):\n try:\n _ = int(val)\n return True\n except Exception:\n return False\n\n\ndef validate_is_float(val):\n try:\n _ = float(val)\n return True\n except Exception:\n return False\n\n\ndef ask_user_overwrite(config_path: Path) -> bool:\n questions = [\n {\n \"type\": \"confirm\",\n \"name\": \"overwrite\",\n \"message\": f\"File {config_path} already exists. Overwrite?\",\n \"default\": False,\n },\n ]\n answers = prompt(questions)\n return answers['overwrite']\n\n\ndef ask_user_config() -> Dict[str, Any]:\n \"\"\"\n Ask user a few questions to build the configuration.\n Interactive questions built using https://github.com/tmbo/questionary\n :returns: Dict with keys to put into template\n \"\"\"\n questions: List[Dict[str, Any]] = [\n {\n \"type\": \"confirm\",\n \"name\": \"dry_run\",\n \"message\": \"Do you want to enable Dry-run (simulated trades)?\",\n \"default\": True,\n },\n {\n \"type\": \"text\",\n \"name\": \"stake_currency\",\n \"message\": \"Please insert your stake currency:\",\n \"default\": 'BTC',\n },\n {\n \"type\": \"text\",\n \"name\": \"stake_amount\",\n \"message\": \"Please insert your stake amount:\",\n \"default\": \"0.01\",\n \"validate\": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),\n },\n {\n \"type\": \"text\",\n \"name\": \"max_open_trades\",\n \"message\": f\"Please insert max_open_trades (Integer or '{UNLIMITED_STAKE_AMOUNT}'):\",\n \"default\": \"3\",\n \"validate\": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_int(val)\n },\n {\n \"type\": \"text\",\n \"name\": \"timeframe\",\n \"message\": \"Please insert your desired timeframe (e.g. 5m):\",\n \"default\": \"5m\",\n },\n {\n \"type\": \"text\",\n \"name\": \"fiat_display_currency\",\n \"message\": \"Please insert your display Currency (for reporting):\",\n \"default\": 'USD',\n },\n {\n \"type\": \"select\",\n \"name\": \"exchange_name\",\n \"message\": \"Select exchange\",\n \"choices\": [\n \"binance\",\n \"binanceus\",\n \"bittrex\",\n \"kraken\",\n \"ftx\",\n Separator(),\n \"other\",\n ],\n },\n {\n \"type\": \"autocomplete\",\n \"name\": \"exchange_name\",\n \"message\": \"Type your exchange name (Must be supported by ccxt)\",\n \"choices\": available_exchanges(),\n \"when\": lambda x: x[\"exchange_name\"] == 'other'\n },\n {\n \"type\": \"password\",\n \"name\": \"exchange_key\",\n \"message\": \"Insert Exchange Key\",\n \"when\": lambda x: not x['dry_run']\n },\n {\n \"type\": \"password\",\n \"name\": \"exchange_secret\",\n \"message\": \"Insert Exchange Secret\",\n \"when\": lambda x: not x['dry_run']\n },\n {\n \"type\": \"confirm\",\n \"name\": \"telegram\",\n \"message\": \"Do you want to enable Telegram?\",\n \"default\": False,\n },\n {\n \"type\": \"password\",\n \"name\": \"telegram_token\",\n \"message\": \"Insert Telegram token\",\n \"when\": lambda x: x['telegram']\n },\n {\n \"type\": \"text\",\n \"name\": \"telegram_chat_id\",\n \"message\": \"Insert Telegram chat id\",\n \"when\": lambda x: x['telegram']\n },\n {\n \"type\": \"confirm\",\n \"name\": \"api_server\",\n \"message\": \"Do you want to enable the Rest API (includes FreqUI)?\",\n \"default\": False,\n },\n {\n \"type\": \"text\",\n \"name\": \"api_server_listen_addr\",\n \"message\": \"Insert Api server Listen Address (best left untouched default!)\",\n \"default\": \"127.0.0.1\",\n \"when\": lambda x: x['api_server']\n },\n {\n \"type\": \"text\",\n \"name\": \"api_server_username\",\n \"message\": \"Insert api-server username\",\n \"default\": \"freqtrader\",\n \"when\": lambda x: x['api_server']\n },\n {\n \"type\": \"text\",\n \"name\": \"api_server_password\",\n \"message\": \"Insert api-server password\",\n \"when\": lambda x: x['api_server']\n },\n ]\n answers = prompt(questions)\n\n if not answers:\n # Interrupted questionary sessions return an empty dict.\n raise OperationalException(\"User interrupted interactive questions.\")\n\n # Force JWT token to be a random string\n answers['api_server_jwt_key'] = secrets.token_hex()\n\n return answers\n\n\ndef deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None:\n \"\"\"\n Applies selections to the template and writes the result to config_path\n :param config_path: Path object for new config file. Should not exist yet\n :param selecions: Dict containing selections taken by the user.\n \"\"\"\n from jinja2.exceptions import TemplateNotFound\n try:\n exchange_template = MAP_EXCHANGE_CHILDCLASS.get(\n selections['exchange_name'], selections['exchange_name'])\n\n selections['exchange'] = render_template(\n templatefile=f\"subtemplates/exchange_{exchange_template}.j2\",\n arguments=selections\n )\n except TemplateNotFound:\n selections['exchange'] = render_template(\n templatefile=\"subtemplates/exchange_generic.j2\",\n arguments=selections\n )\n\n config_text = render_template(templatefile='base_config.json.j2',\n arguments=selections)\n\n logger.info(f\"Writing config to `{config_path}`.\")\n logger.info(\n \"Please make sure to check the configuration contents and adjust settings to your needs.\")\n\n config_path.write_text(config_text)\n\n\ndef start_new_config(args: Dict[str, Any]) -> None:\n \"\"\"\n Create a new strategy from a template\n Asking the user questions to fill out the templateaccordingly.\n \"\"\"\n\n config_path = Path(args['config'][0])\n chown_user_directory(config_path.parent)\n if config_path.exists():\n overwrite = ask_user_overwrite(config_path)\n if overwrite:\n config_path.unlink()\n else:\n raise OperationalException(\n f\"Configuration file `{config_path}` already exists. \"\n \"Please delete it or use a different configuration file name.\")\n selections = ask_user_config()\n deploy_new_config(config_path, selections)\n", "path": "freqtrade/commands/build_config_commands.py"}, {"content": "import logging\nimport shutil\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom freqtrade.constants import USER_DATA_FILES\nfrom freqtrade.exceptions import OperationalException\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_datadir(config: Dict[str, Any], datadir: Optional[str] = None) -> Path:\n\n folder = Path(datadir) if datadir else Path(f\"{config['user_data_dir']}/data\")\n if not datadir:\n # set datadir\n exchange_name = config.get('exchange', {}).get('name').lower()\n folder = folder.joinpath(exchange_name)\n\n if not folder.is_dir():\n folder.mkdir(parents=True)\n logger.info(f'Created data directory: {datadir}')\n return folder\n\n\ndef chown_user_directory(directory: Path) -> None:\n \"\"\"\n Use Sudo to change permissions of the home-directory if necessary\n Only applies when running in docker!\n \"\"\"\n import os\n if os.environ.get('FT_APP_ENV') == 'docker':\n try:\n import subprocess\n subprocess.check_output(\n ['sudo', 'chown', '-R', 'ftuser:', str(directory.resolve())])\n except Exception:\n logger.warning(f\"Could not chown {directory}\")\n\n\ndef create_userdata_dir(directory: str, create_dir: bool = False) -> Path:\n \"\"\"\n Create userdata directory structure.\n if create_dir is True, then the parent-directory will be created if it does not exist.\n Sub-directories will always be created if the parent directory exists.\n Raises OperationalException if given a non-existing directory.\n :param directory: Directory to check\n :param create_dir: Create directory if it does not exist.\n :return: Path object containing the directory\n \"\"\"\n sub_dirs = [\"backtest_results\", \"data\", \"hyperopts\", \"hyperopt_results\", \"logs\",\n \"notebooks\", \"plot\", \"strategies\", ]\n folder = Path(directory)\n chown_user_directory(folder)\n if not folder.is_dir():\n if create_dir:\n folder.mkdir(parents=True)\n logger.info(f'Created user-data directory: {folder}')\n else:\n raise OperationalException(\n f\"Directory `{folder}` does not exist. \"\n \"Please use `freqtrade create-userdir` to create a user directory\")\n\n # Create required subdirectories\n for f in sub_dirs:\n subfolder = folder / f\n if not subfolder.is_dir():\n subfolder.mkdir(parents=False)\n return folder\n\n\ndef copy_sample_files(directory: Path, overwrite: bool = False) -> None:\n \"\"\"\n Copy files from templates to User data directory.\n :param directory: Directory to copy data to\n :param overwrite: Overwrite existing sample files\n \"\"\"\n if not directory.is_dir():\n raise OperationalException(f\"Directory `{directory}` does not exist.\")\n sourcedir = Path(__file__).parents[1] / \"templates\"\n for source, target in USER_DATA_FILES.items():\n targetdir = directory / target\n if not targetdir.is_dir():\n raise OperationalException(f\"Directory `{targetdir}` does not exist.\")\n targetfile = targetdir / source\n if targetfile.exists():\n if not overwrite:\n logger.warning(f\"File `{targetfile}` exists already, not deploying sample file.\")\n continue\n logger.warning(f\"File `{targetfile}` exists already, overwriting.\")\n shutil.copy(str(sourcedir / source), str(targetfile))\n", "path": "freqtrade/configuration/directory_operations.py"}]}
| 3,235 | 462 |
gh_patches_debug_300
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add histology exception tests
**Is your feature request related to a problem? Please describe.**
Currently, the histology inference pipeline contains a lot of exceptions, but they aren't being tested.
**Describe the solution you'd like**
See title.
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 with open("README.md") as readme_file:
13 readme = readme_file.read()
14
15
16 def git_submodule_update():
17 ## submodule update
18 os.system("git submodule update --init --recursive")
19
20
21 class CustomInstallCommand(install):
22 def run(self):
23 install.run(self)
24 git_submodule_update()
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30 git_submodule_update()
31
32
33 class CustomEggInfoCommand(egg_info):
34 def run(self):
35 egg_info.run(self)
36 git_submodule_update()
37
38
39 # read version.py
40 import sys, re
41
42 try:
43 filepath = "GANDLF/version.py"
44 version_file = open(filepath)
45 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
46
47 except Exception as error:
48 __version__ = "0.0.1"
49 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
50
51 requirements = [
52 "black",
53 "numpy==1.22.0",
54 "scipy",
55 "SimpleITK!=2.0.*",
56 "torchvision",
57 "tqdm",
58 "torchio==0.18.57",
59 "pandas",
60 "pylint",
61 "scikit-learn>=0.23.2",
62 "scikit-image>=0.19.1",
63 'pickle5>=0.0.11; python_version < "3.8.0"',
64 "setuptools",
65 "seaborn",
66 "pyyaml",
67 "tiffslide",
68 "matplotlib",
69 "requests>=2.25.0",
70 "pyvips",
71 "pytest",
72 "coverage",
73 "pytest-cov",
74 "psutil",
75 "medcam",
76 "opencv-python",
77 "torchmetrics==0.5.1", # newer versions have changed api for f1 invocation
78 "OpenPatchMiner==0.1.8",
79 "zarr==2.10.3",
80 "pydicom",
81 "onnx",
82 "torchinfo==1.7.0",
83 ]
84
85 # pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389
86 if sys.platform == "darwin":
87 requirements.append("torch==1.9.0")
88 else:
89 requirements.append("torch==1.8.2")
90
91 setup(
92 name="GANDLF",
93 version=__version__,
94 author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
95 author_email="[email protected]",
96 python_requires=">=3.7",
97 packages=find_packages(),
98 cmdclass={ # this ensures git_submodule_update is called during install
99 "install": CustomInstallCommand,
100 "develop": CustomDevelopCommand,
101 "egg_info": CustomEggInfoCommand,
102 },
103 scripts=[
104 "gandlf_run",
105 "gandlf_constructCSV",
106 "gandlf_collectStats",
107 "gandlf_patchMiner",
108 "gandlf_preprocess",
109 "gandlf_anonymizer",
110 "gandlf_verifyInstall",
111 ],
112 classifiers=[
113 "Development Status :: 3 - Alpha",
114 "Intended Audience :: Science/Research",
115 "License :: OSI Approved :: BSD License",
116 "Natural Language :: English",
117 "Operating System :: OS Independent",
118 "Programming Language :: Python :: 3.7",
119 "Programming Language :: Python :: 3.8",
120 "Programming Language :: Python :: 3.9",
121 "Topic :: Scientific/Engineering :: Medical Science Apps",
122 ],
123 description=(
124 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
125 ),
126 install_requires=requirements,
127 license="BSD-3-Clause License",
128 long_description=readme,
129 long_description_content_type="text/markdown",
130 include_package_data=True,
131 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
132 zip_safe=False,
133 )
134
135 ## windows vips installation
136 if os.name == "nt": # proceed for windows
137 from pathlib import Path
138
139 # download and extract if main dll is absent
140 if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
141 print("Downloading and extracting VIPS for Windows")
142 url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
143 zip_to_extract = "./vips.zip"
144 import urllib.request, zipfile
145
146 urllib.request.urlretrieve(url, zip_to_extract)
147 z = zipfile.ZipFile(zip_to_extract)
148 z.extractall("./vips")
149 z.close()
150 os.remove(zip_to_extract)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -67,7 +67,7 @@
"tiffslide",
"matplotlib",
"requests>=2.25.0",
- "pyvips",
+ "pyvips==2.2.1",
"pytest",
"coverage",
"pytest-cov",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -67,7 +67,7 @@\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n- \"pyvips\",\n+ \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n", "issue": "Add histology exception tests\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, the histology inference pipeline contains a lot of exceptions, but they aren't being tested.\r\n\r\n**Describe the solution you'd like**\r\nSee title.\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}]}
| 1,869 | 87 |
gh_patches_debug_7552
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-1132
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError with pymongo `getMore` operations
**Describe your environment**
Describe your environment
Mac OS Catalina 10.15.6 (19G73)
Darwin-19.6.0-x86_64-i386-64bit
Python 3.7.7
Installed packages:
```
opentelemetry-api 0.12b0 OpenTelemetry Python API
opentelemetry-ext-honeycomb 0.5b0 Honeycomb Exporter for OpenTelemetry
opentelemetry-instrumentation 0.12b0 Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python
opentelemetry-instrumentation-botocore 0.12b0 OpenTelemetry Botocore instrumentation
opentelemetry-instrumentation-pymongo 0.12b0 OpenTelemetry pymongo instrumentation
opentelemetry-instrumentation-requests 0.12b0 OpenTelemetry requests instrumentation
pymongo 3.11.0 Python driver for MongoDB <http://www.mongodb.org>
```
**Steps to reproduce**
any find operation where the number of documents to be returned exceeds the batch size on the cursor:
```pyhton
from pymongo import MongoClient
from opentelemetry import trace
from opentelemetry.trace import TracerProvider
from opentelemetry.instrumentation.pymongo import PymongoInstrumentor
trace.set_tracer_provider(TracerProvider())
PymongoInstrumentor().instrument()
client = MongoClient()
db = client["MongoDB_Database"]
collection = db["MongoDB_Collection"]
collection.find({'batch_size': 1})
```
**What is the expected behavior?**
Spans with names like `mongodb.getMore.1`
**What is the actual behavior?**
```
Traceback (most recent call last):
File "/Users/drubin/cargurus/analytics/snowblower/.venv/lib/python3.7/site-packages/pymongo/monitoring.py", line 1266, in publish_command_start
subscriber.started(event)
File "/Users/drubin/cargurus/analytics/snowblower/.venv/lib/python3.7/site-packages/opentelemetry/instrumentation/pymongo/__init__.py", line 69, in started
name += "." + command
TypeError: can only concatenate str (not "int") to str
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The integration with MongoDB supports the `pymongo`_ library, it can be
17 enabled using the ``PymongoInstrumentor``.
18
19 .. _pymongo: https://pypi.org/project/pymongo
20
21 Usage
22 -----
23
24 .. code:: python
25
26 from pymongo import MongoClient
27 from opentelemetry import trace
28 from opentelemetry.trace import TracerProvider
29 from opentelemetry.instrumentation.pymongo import PymongoInstrumentor
30
31 trace.set_tracer_provider(TracerProvider())
32
33 PymongoInstrumentor().instrument()
34 client = MongoClient()
35 db = client["MongoDB_Database"]
36 collection = db["MongoDB_Collection"]
37 collection.find_one()
38
39 API
40 ---
41 """
42
43 from pymongo import monitoring
44
45 from opentelemetry import trace
46 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
47 from opentelemetry.instrumentation.pymongo.version import __version__
48 from opentelemetry.trace import SpanKind, get_tracer
49 from opentelemetry.trace.status import Status, StatusCanonicalCode
50
51 DATABASE_TYPE = "mongodb"
52 COMMAND_ATTRIBUTES = ["filter", "sort", "skip", "limit", "pipeline"]
53
54
55 class CommandTracer(monitoring.CommandListener):
56 def __init__(self, tracer):
57 self._tracer = tracer
58 self._span_dict = {}
59 self.is_enabled = True
60
61 def started(self, event: monitoring.CommandStartedEvent):
62 """ Method to handle a pymongo CommandStartedEvent """
63 if not self.is_enabled:
64 return
65 command = event.command.get(event.command_name, "")
66 name = DATABASE_TYPE + "." + event.command_name
67 statement = event.command_name
68 if command:
69 name += "." + command
70 statement += " " + command
71
72 try:
73 span = self._tracer.start_span(name, kind=SpanKind.CLIENT)
74 span.set_attribute("component", DATABASE_TYPE)
75 span.set_attribute("db.type", DATABASE_TYPE)
76 span.set_attribute("db.instance", event.database_name)
77 span.set_attribute("db.statement", statement)
78 if event.connection_id is not None:
79 span.set_attribute("net.peer.name", event.connection_id[0])
80 span.set_attribute("net.peer.port", event.connection_id[1])
81
82 # pymongo specific, not specified by spec
83 span.set_attribute("db.mongo.operation_id", event.operation_id)
84 span.set_attribute("db.mongo.request_id", event.request_id)
85
86 for attr in COMMAND_ATTRIBUTES:
87 _attr = event.command.get(attr)
88 if _attr is not None:
89 span.set_attribute("db.mongo." + attr, str(_attr))
90
91 # Add Span to dictionary
92 self._span_dict[_get_span_dict_key(event)] = span
93 except Exception as ex: # noqa pylint: disable=broad-except
94 if span is not None:
95 span.set_status(Status(StatusCanonicalCode.INTERNAL, str(ex)))
96 span.end()
97 self._pop_span(event)
98
99 def succeeded(self, event: monitoring.CommandSucceededEvent):
100 """ Method to handle a pymongo CommandSucceededEvent """
101 if not self.is_enabled:
102 return
103 span = self._pop_span(event)
104 if span is None:
105 return
106 span.set_attribute("db.mongo.duration_micros", event.duration_micros)
107 span.set_status(Status(StatusCanonicalCode.OK, event.reply))
108 span.end()
109
110 def failed(self, event: monitoring.CommandFailedEvent):
111 """ Method to handle a pymongo CommandFailedEvent """
112 if not self.is_enabled:
113 return
114 span = self._pop_span(event)
115 if span is None:
116 return
117 span.set_attribute("db.mongo.duration_micros", event.duration_micros)
118 span.set_status(Status(StatusCanonicalCode.UNKNOWN, event.failure))
119 span.end()
120
121 def _pop_span(self, event):
122 return self._span_dict.pop(_get_span_dict_key(event), None)
123
124
125 def _get_span_dict_key(event):
126 if event.connection_id is not None:
127 return (event.request_id, event.connection_id)
128 return event.request_id
129
130
131 class PymongoInstrumentor(BaseInstrumentor):
132 _commandtracer_instance = None # type CommandTracer
133 # The instrumentation for PyMongo is based on the event listener interface
134 # https://api.mongodb.com/python/current/api/pymongo/monitoring.html.
135 # This interface only allows to register listeners and does not provide
136 # an unregister API. In order to provide a mechanishm to disable
137 # instrumentation an enabled flag is implemented in CommandTracer,
138 # it's checked in the different listeners.
139
140 def _instrument(self, **kwargs):
141 """Integrate with pymongo to trace it using event listener.
142 https://api.mongodb.com/python/current/api/pymongo/monitoring.html
143
144 Args:
145 tracer_provider: The `TracerProvider` to use. If none is passed the
146 current configured one is used.
147 """
148
149 tracer_provider = kwargs.get("tracer_provider")
150
151 # Create and register a CommandTracer only the first time
152 if self._commandtracer_instance is None:
153 tracer = get_tracer(__name__, __version__, tracer_provider)
154
155 self._commandtracer_instance = CommandTracer(tracer)
156 monitoring.register(self._commandtracer_instance)
157
158 # If already created, just enable it
159 self._commandtracer_instance.is_enabled = True
160
161 def _uninstrument(self, **kwargs):
162 if self._commandtracer_instance is not None:
163 self._commandtracer_instance.is_enabled = False
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py
@@ -66,8 +66,8 @@
name = DATABASE_TYPE + "." + event.command_name
statement = event.command_name
if command:
- name += "." + command
- statement += " " + command
+ name += "." + str(command)
+ statement += " " + str(command)
try:
span = self._tracer.start_span(name, kind=SpanKind.CLIENT)
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py\n@@ -66,8 +66,8 @@\n name = DATABASE_TYPE + \".\" + event.command_name\n statement = event.command_name\n if command:\n- name += \".\" + command\n- statement += \" \" + command\n+ name += \".\" + str(command)\n+ statement += \" \" + str(command)\n \n try:\n span = self._tracer.start_span(name, kind=SpanKind.CLIENT)\n", "issue": "TypeError with pymongo `getMore` operations\n**Describe your environment** \r\nDescribe your environment\r\nMac OS Catalina 10.15.6 (19G73)\r\nDarwin-19.6.0-x86_64-i386-64bit\r\nPython 3.7.7\r\nInstalled packages:\r\n```\r\nopentelemetry-api 0.12b0 OpenTelemetry Python API\r\nopentelemetry-ext-honeycomb 0.5b0 Honeycomb Exporter for OpenTelemetry\r\nopentelemetry-instrumentation 0.12b0 Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python\r\nopentelemetry-instrumentation-botocore 0.12b0 OpenTelemetry Botocore instrumentation\r\nopentelemetry-instrumentation-pymongo 0.12b0 OpenTelemetry pymongo instrumentation\r\nopentelemetry-instrumentation-requests 0.12b0 OpenTelemetry requests instrumentation\r\npymongo 3.11.0 Python driver for MongoDB <http://www.mongodb.org>\r\n```\r\n\r\n**Steps to reproduce**\r\nany find operation where the number of documents to be returned exceeds the batch size on the cursor:\r\n```pyhton\r\nfrom pymongo import MongoClient\r\nfrom opentelemetry import trace\r\nfrom opentelemetry.trace import TracerProvider\r\nfrom opentelemetry.instrumentation.pymongo import PymongoInstrumentor\r\n\r\ntrace.set_tracer_provider(TracerProvider())\r\n\r\nPymongoInstrumentor().instrument()\r\nclient = MongoClient()\r\ndb = client[\"MongoDB_Database\"]\r\ncollection = db[\"MongoDB_Collection\"]\r\ncollection.find({'batch_size': 1})\r\n```\r\n**What is the expected behavior?**\r\nSpans with names like `mongodb.getMore.1`\r\n\r\n**What is the actual behavior?**\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/drubin/cargurus/analytics/snowblower/.venv/lib/python3.7/site-packages/pymongo/monitoring.py\", line 1266, in publish_command_start\r\n subscriber.started(event)\r\n File \"/Users/drubin/cargurus/analytics/snowblower/.venv/lib/python3.7/site-packages/opentelemetry/instrumentation/pymongo/__init__.py\", line 69, in started\r\n name += \".\" + command\r\nTypeError: can only concatenate str (not \"int\") to str\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe integration with MongoDB supports the `pymongo`_ library, it can be\nenabled using the ``PymongoInstrumentor``.\n\n.. _pymongo: https://pypi.org/project/pymongo\n\nUsage\n-----\n\n.. code:: python\n\n from pymongo import MongoClient\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n from opentelemetry.instrumentation.pymongo import PymongoInstrumentor\n\n trace.set_tracer_provider(TracerProvider())\n\n PymongoInstrumentor().instrument()\n client = MongoClient()\n db = client[\"MongoDB_Database\"]\n collection = db[\"MongoDB_Collection\"]\n collection.find_one()\n\nAPI\n---\n\"\"\"\n\nfrom pymongo import monitoring\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.pymongo.version import __version__\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCanonicalCode\n\nDATABASE_TYPE = \"mongodb\"\nCOMMAND_ATTRIBUTES = [\"filter\", \"sort\", \"skip\", \"limit\", \"pipeline\"]\n\n\nclass CommandTracer(monitoring.CommandListener):\n def __init__(self, tracer):\n self._tracer = tracer\n self._span_dict = {}\n self.is_enabled = True\n\n def started(self, event: monitoring.CommandStartedEvent):\n \"\"\" Method to handle a pymongo CommandStartedEvent \"\"\"\n if not self.is_enabled:\n return\n command = event.command.get(event.command_name, \"\")\n name = DATABASE_TYPE + \".\" + event.command_name\n statement = event.command_name\n if command:\n name += \".\" + command\n statement += \" \" + command\n\n try:\n span = self._tracer.start_span(name, kind=SpanKind.CLIENT)\n span.set_attribute(\"component\", DATABASE_TYPE)\n span.set_attribute(\"db.type\", DATABASE_TYPE)\n span.set_attribute(\"db.instance\", event.database_name)\n span.set_attribute(\"db.statement\", statement)\n if event.connection_id is not None:\n span.set_attribute(\"net.peer.name\", event.connection_id[0])\n span.set_attribute(\"net.peer.port\", event.connection_id[1])\n\n # pymongo specific, not specified by spec\n span.set_attribute(\"db.mongo.operation_id\", event.operation_id)\n span.set_attribute(\"db.mongo.request_id\", event.request_id)\n\n for attr in COMMAND_ATTRIBUTES:\n _attr = event.command.get(attr)\n if _attr is not None:\n span.set_attribute(\"db.mongo.\" + attr, str(_attr))\n\n # Add Span to dictionary\n self._span_dict[_get_span_dict_key(event)] = span\n except Exception as ex: # noqa pylint: disable=broad-except\n if span is not None:\n span.set_status(Status(StatusCanonicalCode.INTERNAL, str(ex)))\n span.end()\n self._pop_span(event)\n\n def succeeded(self, event: monitoring.CommandSucceededEvent):\n \"\"\" Method to handle a pymongo CommandSucceededEvent \"\"\"\n if not self.is_enabled:\n return\n span = self._pop_span(event)\n if span is None:\n return\n span.set_attribute(\"db.mongo.duration_micros\", event.duration_micros)\n span.set_status(Status(StatusCanonicalCode.OK, event.reply))\n span.end()\n\n def failed(self, event: monitoring.CommandFailedEvent):\n \"\"\" Method to handle a pymongo CommandFailedEvent \"\"\"\n if not self.is_enabled:\n return\n span = self._pop_span(event)\n if span is None:\n return\n span.set_attribute(\"db.mongo.duration_micros\", event.duration_micros)\n span.set_status(Status(StatusCanonicalCode.UNKNOWN, event.failure))\n span.end()\n\n def _pop_span(self, event):\n return self._span_dict.pop(_get_span_dict_key(event), None)\n\n\ndef _get_span_dict_key(event):\n if event.connection_id is not None:\n return (event.request_id, event.connection_id)\n return event.request_id\n\n\nclass PymongoInstrumentor(BaseInstrumentor):\n _commandtracer_instance = None # type CommandTracer\n # The instrumentation for PyMongo is based on the event listener interface\n # https://api.mongodb.com/python/current/api/pymongo/monitoring.html.\n # This interface only allows to register listeners and does not provide\n # an unregister API. In order to provide a mechanishm to disable\n # instrumentation an enabled flag is implemented in CommandTracer,\n # it's checked in the different listeners.\n\n def _instrument(self, **kwargs):\n \"\"\"Integrate with pymongo to trace it using event listener.\n https://api.mongodb.com/python/current/api/pymongo/monitoring.html\n\n Args:\n tracer_provider: The `TracerProvider` to use. If none is passed the\n current configured one is used.\n \"\"\"\n\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # Create and register a CommandTracer only the first time\n if self._commandtracer_instance is None:\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n self._commandtracer_instance = CommandTracer(tracer)\n monitoring.register(self._commandtracer_instance)\n\n # If already created, just enable it\n self._commandtracer_instance.is_enabled = True\n\n def _uninstrument(self, **kwargs):\n if self._commandtracer_instance is not None:\n self._commandtracer_instance.is_enabled = False\n", "path": "instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe integration with MongoDB supports the `pymongo`_ library, it can be\nenabled using the ``PymongoInstrumentor``.\n\n.. _pymongo: https://pypi.org/project/pymongo\n\nUsage\n-----\n\n.. code:: python\n\n from pymongo import MongoClient\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n from opentelemetry.instrumentation.pymongo import PymongoInstrumentor\n\n trace.set_tracer_provider(TracerProvider())\n\n PymongoInstrumentor().instrument()\n client = MongoClient()\n db = client[\"MongoDB_Database\"]\n collection = db[\"MongoDB_Collection\"]\n collection.find_one()\n\nAPI\n---\n\"\"\"\n\nfrom pymongo import monitoring\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.pymongo.version import __version__\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCanonicalCode\n\nDATABASE_TYPE = \"mongodb\"\nCOMMAND_ATTRIBUTES = [\"filter\", \"sort\", \"skip\", \"limit\", \"pipeline\"]\n\n\nclass CommandTracer(monitoring.CommandListener):\n def __init__(self, tracer):\n self._tracer = tracer\n self._span_dict = {}\n self.is_enabled = True\n\n def started(self, event: monitoring.CommandStartedEvent):\n \"\"\" Method to handle a pymongo CommandStartedEvent \"\"\"\n if not self.is_enabled:\n return\n command = event.command.get(event.command_name, \"\")\n name = DATABASE_TYPE + \".\" + event.command_name\n statement = event.command_name\n if command:\n name += \".\" + str(command)\n statement += \" \" + str(command)\n\n try:\n span = self._tracer.start_span(name, kind=SpanKind.CLIENT)\n span.set_attribute(\"component\", DATABASE_TYPE)\n span.set_attribute(\"db.type\", DATABASE_TYPE)\n span.set_attribute(\"db.instance\", event.database_name)\n span.set_attribute(\"db.statement\", statement)\n if event.connection_id is not None:\n span.set_attribute(\"net.peer.name\", event.connection_id[0])\n span.set_attribute(\"net.peer.port\", event.connection_id[1])\n\n # pymongo specific, not specified by spec\n span.set_attribute(\"db.mongo.operation_id\", event.operation_id)\n span.set_attribute(\"db.mongo.request_id\", event.request_id)\n\n for attr in COMMAND_ATTRIBUTES:\n _attr = event.command.get(attr)\n if _attr is not None:\n span.set_attribute(\"db.mongo.\" + attr, str(_attr))\n\n # Add Span to dictionary\n self._span_dict[_get_span_dict_key(event)] = span\n except Exception as ex: # noqa pylint: disable=broad-except\n if span is not None:\n span.set_status(Status(StatusCanonicalCode.INTERNAL, str(ex)))\n span.end()\n self._pop_span(event)\n\n def succeeded(self, event: monitoring.CommandSucceededEvent):\n \"\"\" Method to handle a pymongo CommandSucceededEvent \"\"\"\n if not self.is_enabled:\n return\n span = self._pop_span(event)\n if span is None:\n return\n span.set_attribute(\"db.mongo.duration_micros\", event.duration_micros)\n span.set_status(Status(StatusCanonicalCode.OK, event.reply))\n span.end()\n\n def failed(self, event: monitoring.CommandFailedEvent):\n \"\"\" Method to handle a pymongo CommandFailedEvent \"\"\"\n if not self.is_enabled:\n return\n span = self._pop_span(event)\n if span is None:\n return\n span.set_attribute(\"db.mongo.duration_micros\", event.duration_micros)\n span.set_status(Status(StatusCanonicalCode.UNKNOWN, event.failure))\n span.end()\n\n def _pop_span(self, event):\n return self._span_dict.pop(_get_span_dict_key(event), None)\n\n\ndef _get_span_dict_key(event):\n if event.connection_id is not None:\n return (event.request_id, event.connection_id)\n return event.request_id\n\n\nclass PymongoInstrumentor(BaseInstrumentor):\n _commandtracer_instance = None # type CommandTracer\n # The instrumentation for PyMongo is based on the event listener interface\n # https://api.mongodb.com/python/current/api/pymongo/monitoring.html.\n # This interface only allows to register listeners and does not provide\n # an unregister API. In order to provide a mechanishm to disable\n # instrumentation an enabled flag is implemented in CommandTracer,\n # it's checked in the different listeners.\n\n def _instrument(self, **kwargs):\n \"\"\"Integrate with pymongo to trace it using event listener.\n https://api.mongodb.com/python/current/api/pymongo/monitoring.html\n\n Args:\n tracer_provider: The `TracerProvider` to use. If none is passed the\n current configured one is used.\n \"\"\"\n\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # Create and register a CommandTracer only the first time\n if self._commandtracer_instance is None:\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n self._commandtracer_instance = CommandTracer(tracer)\n monitoring.register(self._commandtracer_instance)\n\n # If already created, just enable it\n self._commandtracer_instance.is_enabled = True\n\n def _uninstrument(self, **kwargs):\n if self._commandtracer_instance is not None:\n self._commandtracer_instance.is_enabled = False\n", "path": "instrumentation/opentelemetry-instrumentation-pymongo/src/opentelemetry/instrumentation/pymongo/__init__.py"}]}
| 2,489 | 207 |
gh_patches_debug_38652
|
rasdani/github-patches
|
git_diff
|
sagemath__sage-37422
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make sage.parallel.ncpus.ncpus() use os.cpu_count()
<div id="comment:0"></div>
Currently, `sage.parallel.ncpus.ncpus()` uses platform-specific code to determine the number of available CPUs for some specific systems. This functionality is now available in the standard `os` module as `cpu_count()`.
Component: **misc**
Author: **Lorenz Panny**
Branch/Commit: **[public/change_ncpus_to_os_module](https://github.com/sagemath/sagetrac-mirror/tree/public/change_ncpus_to_os_module) @ [`a509210`](https://github.com/sagemath/sagetrac-mirror/commit/a509210125fc50baf72dcb7f2248e96cddf61c8f)**
_Issue created by migration from https://trac.sagemath.org/ticket/34328_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sage/parallel/ncpus.py`
Content:
```
1 """
2 CPU Detection
3 """
4 # Parallel Python Software: http://www.parallelpython.com
5 # Copyright (c) 2005-2008, Vitalii Vanovschi
6 # All rights reserved.
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright notice,
10 # this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of the author nor the names of its contributors
15 # may be used to endorse or promote products derived from this software
16 # without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 # THE POSSIBILITY OF SUCH DAMAGE.
29
30 ######
31 # This is from ParallelPython (the pp.py file).
32
33 import os
34 import subprocess
35
36
37 def ncpus():
38 """
39 Detects the number of effective CPUs in the system.
40
41 EXAMPLES::
42
43 sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.
44 2
45 """
46 # Support Sage environment variable SAGE_NUM_THREADS
47 # NOTE: while doctesting, this is forced to be 2 by the
48 # sage-runtests script
49 try:
50 n = os.environ["SAGE_NUM_THREADS"]
51 except KeyError:
52 pass
53 else:
54 return int(n)
55
56 # for Linux, Unix and MacOS
57 if hasattr(os, "sysconf"):
58 if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
59 # Linux and Unix
60 ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
61 if isinstance(ncpus, int) and ncpus > 0:
62 return ncpus
63 else:
64 # MacOS X
65 # deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read())
66 process = subprocess.Popen("sysctl -n hw.ncpu", shell=True,
67 stdin=subprocess.PIPE,
68 stdout=subprocess.PIPE,
69 stderr=subprocess.PIPE, close_fds=True)
70 return int(process.stdout.read())
71 # for Windows
72 if "NUMBER_OF_PROCESSORS" in os.environ:
73 ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
74 if ncpus > 0:
75 return ncpus
76 # return the default value
77 return 1
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sage/parallel/ncpus.py b/src/sage/parallel/ncpus.py
--- a/src/sage/parallel/ncpus.py
+++ b/src/sage/parallel/ncpus.py
@@ -1,46 +1,19 @@
"""
CPU Detection
"""
-# Parallel Python Software: http://www.parallelpython.com
-# Copyright (c) 2005-2008, Vitalii Vanovschi
-# All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# * Neither the name of the author nor the names of its contributors
-# may be used to endorse or promote products derived from this software
-# without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
-# THE POSSIBILITY OF SUCH DAMAGE.
-
-######
-# This is from ParallelPython (the pp.py file).
import os
-import subprocess
def ncpus():
"""
- Detects the number of effective CPUs in the system.
+ Return the number of available CPUs in the system.
+
+ ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`
EXAMPLES::
- sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.
+ sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine
2
"""
# Support Sage environment variable SAGE_NUM_THREADS
@@ -53,25 +26,9 @@
else:
return int(n)
- # for Linux, Unix and MacOS
- if hasattr(os, "sysconf"):
- if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
- # Linux and Unix
- ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
- if isinstance(ncpus, int) and ncpus > 0:
- return ncpus
- else:
- # MacOS X
- # deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read())
- process = subprocess.Popen("sysctl -n hw.ncpu", shell=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, close_fds=True)
- return int(process.stdout.read())
- # for Windows
- if "NUMBER_OF_PROCESSORS" in os.environ:
- ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
- if ncpus > 0:
- return ncpus
- # return the default value
- return 1
+ n = None
+
+ if hasattr(os, 'sched_getaffinity'):
+ n = len(os.sched_getaffinity(0))
+
+ return n or os.cpu_count() or 1
|
{"golden_diff": "diff --git a/src/sage/parallel/ncpus.py b/src/sage/parallel/ncpus.py\n--- a/src/sage/parallel/ncpus.py\n+++ b/src/sage/parallel/ncpus.py\n@@ -1,46 +1,19 @@\n \"\"\"\n CPU Detection\n \"\"\"\n-# Parallel Python Software: http://www.parallelpython.com\n-# Copyright (c) 2005-2008, Vitalii Vanovschi\n-# All rights reserved.\n-# Redistribution and use in source and binary forms, with or without\n-# modification, are permitted provided that the following conditions are met:\n-# * Redistributions of source code must retain the above copyright notice,\n-# this list of conditions and the following disclaimer.\n-# * Redistributions in binary form must reproduce the above copyright\n-# notice, this list of conditions and the following disclaimer in the\n-# documentation and/or other materials provided with the distribution.\n-# * Neither the name of the author nor the names of its contributors\n-# may be used to endorse or promote products derived from this software\n-# without specific prior written permission.\n-#\n-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n-# THE POSSIBILITY OF SUCH DAMAGE.\n-\n-######\n-# This is from ParallelPython (the pp.py file).\n \n import os\n-import subprocess\n \n \n def ncpus():\n \"\"\"\n- Detects the number of effective CPUs in the system.\n+ Return the number of available CPUs in the system.\n+\n+ ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`\n \n EXAMPLES::\n \n- sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.\n+ sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n@@ -53,25 +26,9 @@\n else:\n return int(n)\n \n- # for Linux, Unix and MacOS\n- if hasattr(os, \"sysconf\"):\n- if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n- # Linux and Unix\n- ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n- if isinstance(ncpus, int) and ncpus > 0:\n- return ncpus\n- else:\n- # MacOS X\n- # deprecated: return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n- process = subprocess.Popen(\"sysctl -n hw.ncpu\", shell=True,\n- stdin=subprocess.PIPE,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.PIPE, close_fds=True)\n- return int(process.stdout.read())\n- # for Windows\n- if \"NUMBER_OF_PROCESSORS\" in os.environ:\n- ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n- if ncpus > 0:\n- return ncpus\n- # return the default value\n- return 1\n+ n = None\n+\n+ if hasattr(os, 'sched_getaffinity'):\n+ n = len(os.sched_getaffinity(0))\n+\n+ return n or os.cpu_count() or 1\n", "issue": "make sage.parallel.ncpus.ncpus() use os.cpu_count()\n<div id=\"comment:0\"></div>\n\nCurrently, `sage.parallel.ncpus.ncpus()` uses platform-specific code to determine the number of available CPUs for some specific systems. This functionality is now available in the standard `os` module as `cpu_count()`.\n\nComponent: **misc**\n\nAuthor: **Lorenz Panny**\n\nBranch/Commit: **[public/change_ncpus_to_os_module](https://github.com/sagemath/sagetrac-mirror/tree/public/change_ncpus_to_os_module) @ [`a509210`](https://github.com/sagemath/sagetrac-mirror/commit/a509210125fc50baf72dcb7f2248e96cddf61c8f)**\n\n_Issue created by migration from https://trac.sagemath.org/ticket/34328_\n\n\n", "before_files": [{"content": "\"\"\"\nCPU Detection\n\"\"\"\n# Parallel Python Software: http://www.parallelpython.com\n# Copyright (c) 2005-2008, Vitalii Vanovschi\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n\n######\n# This is from ParallelPython (the pp.py file).\n\nimport os\nimport subprocess\n\n\ndef ncpus():\n \"\"\"\n Detects the number of effective CPUs in the system.\n\n EXAMPLES::\n\n sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n # NOTE: while doctesting, this is forced to be 2 by the\n # sage-runtests script\n try:\n n = os.environ[\"SAGE_NUM_THREADS\"]\n except KeyError:\n pass\n else:\n return int(n)\n\n # for Linux, Unix and MacOS\n if hasattr(os, \"sysconf\"):\n if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n # Linux and Unix\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n return ncpus\n else:\n # MacOS X\n # deprecated: return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n process = subprocess.Popen(\"sysctl -n hw.ncpu\", shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n return int(process.stdout.read())\n # for Windows\n if \"NUMBER_OF_PROCESSORS\" in os.environ:\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n if ncpus > 0:\n return ncpus\n # return the default value\n return 1\n", "path": "src/sage/parallel/ncpus.py"}], "after_files": [{"content": "\"\"\"\nCPU Detection\n\"\"\"\n\nimport os\n\n\ndef ncpus():\n \"\"\"\n Return the number of available CPUs in the system.\n\n ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`\n\n EXAMPLES::\n\n sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n # NOTE: while doctesting, this is forced to be 2 by the\n # sage-runtests script\n try:\n n = os.environ[\"SAGE_NUM_THREADS\"]\n except KeyError:\n pass\n else:\n return int(n)\n\n n = None\n\n if hasattr(os, 'sched_getaffinity'):\n n = len(os.sched_getaffinity(0))\n\n return n or os.cpu_count() or 1\n", "path": "src/sage/parallel/ncpus.py"}]}
| 1,302 | 867 |
gh_patches_debug_8709
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-817
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
last_event_id() returns None after SentryAsgiMiddleware.__call__() finished
I'm unable to access `last_event_id` of `SentryAsgiMiddleware` on exception handler in `Starlette` framework.
```python
from sentry_sdk import last_event_id
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
async def test_endpoint(request):
raise RuntimeError("test")
def exception_handler(*args, **kwargs):
return JSONResponse({"last_event_id": last_event_id()})
app = Starlette(
routes=[Route('/', test_endpoint)],
exception_handlers={
Exception: exception_handler,
}
)
app.add_middleware(SentryAsgiMiddleware)
```
the problem is probably with usage of Hub's context manager in `SentryAsgiMiddleware._run_app()` - after throwing exception you are clearing local `ContextVar` so `last_event_id` function tries to access wrong Hub instance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/asgi.py`
Content:
```
1 """
2 An ASGI middleware.
3
4 Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
5 """
6
7 import asyncio
8 import inspect
9 import urllib
10
11 from sentry_sdk._functools import partial
12 from sentry_sdk._types import MYPY
13 from sentry_sdk.hub import Hub, _should_send_default_pii
14 from sentry_sdk.integrations._wsgi_common import _filter_headers
15 from sentry_sdk.utils import (
16 ContextVar,
17 event_from_exception,
18 transaction_from_function,
19 HAS_REAL_CONTEXTVARS,
20 CONTEXTVARS_ERROR_MESSAGE,
21 )
22 from sentry_sdk.tracing import Transaction
23
24 if MYPY:
25 from typing import Dict
26 from typing import Any
27 from typing import Optional
28 from typing import Callable
29
30 from typing_extensions import Literal
31
32 from sentry_sdk._types import Event, Hint
33
34
35 _asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
36
37 _DEFAULT_TRANSACTION_NAME = "generic ASGI request"
38
39
40 def _capture_exception(hub, exc):
41 # type: (Hub, Any) -> None
42
43 # Check client here as it might have been unset while streaming response
44 if hub.client is not None:
45 event, hint = event_from_exception(
46 exc,
47 client_options=hub.client.options,
48 mechanism={"type": "asgi", "handled": False},
49 )
50 hub.capture_event(event, hint=hint)
51
52
53 def _looks_like_asgi3(app):
54 # type: (Any) -> bool
55 """
56 Try to figure out if an application object supports ASGI3.
57
58 This is how uvicorn figures out the application version as well.
59 """
60 if inspect.isclass(app):
61 return hasattr(app, "__await__")
62 elif inspect.isfunction(app):
63 return asyncio.iscoroutinefunction(app)
64 else:
65 call = getattr(app, "__call__", None) # noqa
66 return asyncio.iscoroutinefunction(call)
67
68
69 class SentryAsgiMiddleware:
70 __slots__ = ("app", "__call__")
71
72 def __init__(self, app, unsafe_context_data=False):
73 # type: (Any, bool) -> None
74 """
75 Instrument an ASGI application with Sentry. Provides HTTP/websocket
76 data to sent events and basic handling for exceptions bubbling up
77 through the middleware.
78
79 :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
80 """
81
82 if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
83 # We better have contextvars or we're going to leak state between
84 # requests.
85 raise RuntimeError(
86 "The ASGI middleware for Sentry requires Python 3.7+ "
87 "or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
88 )
89 self.app = app
90
91 if _looks_like_asgi3(app):
92 self.__call__ = self._run_asgi3 # type: Callable[..., Any]
93 else:
94 self.__call__ = self._run_asgi2
95
96 def _run_asgi2(self, scope):
97 # type: (Any) -> Any
98 async def inner(receive, send):
99 # type: (Any, Any) -> Any
100 return await self._run_app(scope, lambda: self.app(scope)(receive, send))
101
102 return inner
103
104 async def _run_asgi3(self, scope, receive, send):
105 # type: (Any, Any, Any) -> Any
106 return await self._run_app(scope, lambda: self.app(scope, receive, send))
107
108 async def _run_app(self, scope, callback):
109 # type: (Any, Any) -> Any
110 if _asgi_middleware_applied.get(False):
111 return await callback()
112
113 _asgi_middleware_applied.set(True)
114 try:
115 hub = Hub(Hub.current)
116 with hub:
117 with hub.configure_scope() as sentry_scope:
118 sentry_scope.clear_breadcrumbs()
119 sentry_scope._name = "asgi"
120 processor = partial(self.event_processor, asgi_scope=scope)
121 sentry_scope.add_event_processor(processor)
122
123 ty = scope["type"]
124
125 if ty in ("http", "websocket"):
126 transaction = Transaction.continue_from_headers(
127 dict(scope["headers"]),
128 op="{}.server".format(ty),
129 )
130 else:
131 transaction = Transaction(op="asgi.server")
132
133 transaction.name = _DEFAULT_TRANSACTION_NAME
134 transaction.set_tag("asgi.type", ty)
135
136 with hub.start_transaction(transaction):
137 # XXX: Would be cool to have correct span status, but we
138 # would have to wrap send(). That is a bit hard to do with
139 # the current abstraction over ASGI 2/3.
140 try:
141 return await callback()
142 except Exception as exc:
143 _capture_exception(hub, exc)
144 raise exc from None
145 finally:
146 _asgi_middleware_applied.set(False)
147
148 def event_processor(self, event, hint, asgi_scope):
149 # type: (Event, Hint, Any) -> Optional[Event]
150 request_info = event.get("request", {})
151
152 ty = asgi_scope["type"]
153 if ty in ("http", "websocket"):
154 request_info["method"] = asgi_scope.get("method")
155 request_info["headers"] = headers = _filter_headers(
156 self._get_headers(asgi_scope)
157 )
158 request_info["query_string"] = self._get_query(asgi_scope)
159
160 request_info["url"] = self._get_url(
161 asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
162 )
163
164 client = asgi_scope.get("client")
165 if client and _should_send_default_pii():
166 request_info["env"] = {"REMOTE_ADDR": client[0]}
167
168 if (
169 event.get("transaction", _DEFAULT_TRANSACTION_NAME)
170 == _DEFAULT_TRANSACTION_NAME
171 ):
172 endpoint = asgi_scope.get("endpoint")
173 # Webframeworks like Starlette mutate the ASGI env once routing is
174 # done, which is sometime after the request has started. If we have
175 # an endpoint, overwrite our generic transaction name.
176 if endpoint:
177 event["transaction"] = transaction_from_function(endpoint)
178
179 event["request"] = request_info
180
181 return event
182
183 # Helper functions for extracting request data.
184 #
185 # Note: Those functions are not public API. If you want to mutate request
186 # data to your liking it's recommended to use the `before_send` callback
187 # for that.
188
189 def _get_url(self, scope, default_scheme, host):
190 # type: (Dict[str, Any], Literal["ws", "http"], Optional[str]) -> str
191 """
192 Extract URL from the ASGI scope, without also including the querystring.
193 """
194 scheme = scope.get("scheme", default_scheme)
195
196 server = scope.get("server", None)
197 path = scope.get("root_path", "") + scope.get("path", "")
198
199 if host:
200 return "%s://%s%s" % (scheme, host, path)
201
202 if server is not None:
203 host, port = server
204 default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
205 if port != default_port:
206 return "%s://%s:%s%s" % (scheme, host, port, path)
207 return "%s://%s%s" % (scheme, host, path)
208 return path
209
210 def _get_query(self, scope):
211 # type: (Any) -> Any
212 """
213 Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
214 """
215 qs = scope.get("query_string")
216 if not qs:
217 return None
218 return urllib.parse.unquote(qs.decode("latin-1"))
219
220 def _get_headers(self, scope):
221 # type: (Any) -> Dict[str, str]
222 """
223 Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
224 """
225 headers = {} # type: Dict[str, str]
226 for raw_key, raw_value in scope["headers"]:
227 key = raw_key.decode("latin-1")
228 value = raw_value.decode("latin-1")
229 if key in headers:
230 headers[key] = headers[key] + ", " + value
231 else:
232 headers[key] = value
233 return headers
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py
--- a/sentry_sdk/integrations/asgi.py
+++ b/sentry_sdk/integrations/asgi.py
@@ -107,8 +107,14 @@
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
- if _asgi_middleware_applied.get(False):
- return await callback()
+ is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)
+
+ if is_recursive_asgi_middleware:
+ try:
+ return await callback()
+ except Exception as exc:
+ _capture_exception(Hub.current, exc)
+ raise exc from None
_asgi_middleware_applied.set(True)
try:
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py\n--- a/sentry_sdk/integrations/asgi.py\n+++ b/sentry_sdk/integrations/asgi.py\n@@ -107,8 +107,14 @@\n \n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n- if _asgi_middleware_applied.get(False):\n- return await callback()\n+ is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)\n+\n+ if is_recursive_asgi_middleware:\n+ try:\n+ return await callback()\n+ except Exception as exc:\n+ _capture_exception(Hub.current, exc)\n+ raise exc from None\n \n _asgi_middleware_applied.set(True)\n try:\n", "issue": "last_event_id() returns None after SentryAsgiMiddleware.__call__() finished\nI'm unable to access `last_event_id` of `SentryAsgiMiddleware` on exception handler in `Starlette` framework.\r\n```python\r\nfrom sentry_sdk import last_event_id\r\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\r\nfrom starlette.applications import Starlette\r\nfrom starlette.responses import JSONResponse\r\nfrom starlette.routing import Route\r\n\r\n\r\nasync def test_endpoint(request):\r\n raise RuntimeError(\"test\")\r\n\r\n\r\ndef exception_handler(*args, **kwargs):\r\n return JSONResponse({\"last_event_id\": last_event_id()})\r\n\r\n\r\napp = Starlette(\r\n routes=[Route('/', test_endpoint)],\r\n exception_handlers={\r\n Exception: exception_handler,\r\n }\r\n)\r\napp.add_middleware(SentryAsgiMiddleware)\r\n``` \r\n\r\nthe problem is probably with usage of Hub's context manager in `SentryAsgiMiddleware._run_app()` - after throwing exception you are clearing local `ContextVar` so `last_event_id` function tries to access wrong Hub instance.\n", "before_files": [{"content": "\"\"\"\nAn ASGI middleware.\n\nBased on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n\"\"\"\n\nimport asyncio\nimport inspect\nimport urllib\n\nfrom sentry_sdk._functools import partial\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\nfrom sentry_sdk.utils import (\n ContextVar,\n event_from_exception,\n transaction_from_function,\n HAS_REAL_CONTEXTVARS,\n CONTEXTVARS_ERROR_MESSAGE,\n)\nfrom sentry_sdk.tracing import Transaction\n\nif MYPY:\n from typing import Dict\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n from typing_extensions import Literal\n\n from sentry_sdk._types import Event, Hint\n\n\n_asgi_middleware_applied = ContextVar(\"sentry_asgi_middleware_applied\")\n\n_DEFAULT_TRANSACTION_NAME = \"generic ASGI request\"\n\n\ndef _capture_exception(hub, exc):\n # type: (Hub, Any) -> None\n\n # Check client here as it might have been unset while streaming response\n if hub.client is not None:\n event, hint = event_from_exception(\n exc,\n client_options=hub.client.options,\n mechanism={\"type\": \"asgi\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _looks_like_asgi3(app):\n # type: (Any) -> bool\n \"\"\"\n Try to figure out if an application object supports ASGI3.\n\n This is how uvicorn figures out the application version as well.\n \"\"\"\n if inspect.isclass(app):\n return hasattr(app, \"__await__\")\n elif inspect.isfunction(app):\n return asyncio.iscoroutinefunction(app)\n else:\n call = getattr(app, \"__call__\", None) # noqa\n return asyncio.iscoroutinefunction(call)\n\n\nclass SentryAsgiMiddleware:\n __slots__ = (\"app\", \"__call__\")\n\n def __init__(self, app, unsafe_context_data=False):\n # type: (Any, bool) -> None\n \"\"\"\n Instrument an ASGI application with Sentry. Provides HTTP/websocket\n data to sent events and basic handling for exceptions bubbling up\n through the middleware.\n\n :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.\n \"\"\"\n\n if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise RuntimeError(\n \"The ASGI middleware for Sentry requires Python 3.7+ \"\n \"or the aiocontextvars package.\" + CONTEXTVARS_ERROR_MESSAGE\n )\n self.app = app\n\n if _looks_like_asgi3(app):\n self.__call__ = self._run_asgi3 # type: Callable[..., Any]\n else:\n self.__call__ = self._run_asgi2\n\n def _run_asgi2(self, scope):\n # type: (Any) -> Any\n async def inner(receive, send):\n # type: (Any, Any) -> Any\n return await self._run_app(scope, lambda: self.app(scope)(receive, send))\n\n return inner\n\n async def _run_asgi3(self, scope, receive, send):\n # type: (Any, Any, Any) -> Any\n return await self._run_app(scope, lambda: self.app(scope, receive, send))\n\n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n if _asgi_middleware_applied.get(False):\n return await callback()\n\n _asgi_middleware_applied.set(True)\n try:\n hub = Hub(Hub.current)\n with hub:\n with hub.configure_scope() as sentry_scope:\n sentry_scope.clear_breadcrumbs()\n sentry_scope._name = \"asgi\"\n processor = partial(self.event_processor, asgi_scope=scope)\n sentry_scope.add_event_processor(processor)\n\n ty = scope[\"type\"]\n\n if ty in (\"http\", \"websocket\"):\n transaction = Transaction.continue_from_headers(\n dict(scope[\"headers\"]),\n op=\"{}.server\".format(ty),\n )\n else:\n transaction = Transaction(op=\"asgi.server\")\n\n transaction.name = _DEFAULT_TRANSACTION_NAME\n transaction.set_tag(\"asgi.type\", ty)\n\n with hub.start_transaction(transaction):\n # XXX: Would be cool to have correct span status, but we\n # would have to wrap send(). That is a bit hard to do with\n # the current abstraction over ASGI 2/3.\n try:\n return await callback()\n except Exception as exc:\n _capture_exception(hub, exc)\n raise exc from None\n finally:\n _asgi_middleware_applied.set(False)\n\n def event_processor(self, event, hint, asgi_scope):\n # type: (Event, Hint, Any) -> Optional[Event]\n request_info = event.get(\"request\", {})\n\n ty = asgi_scope[\"type\"]\n if ty in (\"http\", \"websocket\"):\n request_info[\"method\"] = asgi_scope.get(\"method\")\n request_info[\"headers\"] = headers = _filter_headers(\n self._get_headers(asgi_scope)\n )\n request_info[\"query_string\"] = self._get_query(asgi_scope)\n\n request_info[\"url\"] = self._get_url(\n asgi_scope, \"http\" if ty == \"http\" else \"ws\", headers.get(\"host\")\n )\n\n client = asgi_scope.get(\"client\")\n if client and _should_send_default_pii():\n request_info[\"env\"] = {\"REMOTE_ADDR\": client[0]}\n\n if (\n event.get(\"transaction\", _DEFAULT_TRANSACTION_NAME)\n == _DEFAULT_TRANSACTION_NAME\n ):\n endpoint = asgi_scope.get(\"endpoint\")\n # Webframeworks like Starlette mutate the ASGI env once routing is\n # done, which is sometime after the request has started. If we have\n # an endpoint, overwrite our generic transaction name.\n if endpoint:\n event[\"transaction\"] = transaction_from_function(endpoint)\n\n event[\"request\"] = request_info\n\n return event\n\n # Helper functions for extracting request data.\n #\n # Note: Those functions are not public API. If you want to mutate request\n # data to your liking it's recommended to use the `before_send` callback\n # for that.\n\n def _get_url(self, scope, default_scheme, host):\n # type: (Dict[str, Any], Literal[\"ws\", \"http\"], Optional[str]) -> str\n \"\"\"\n Extract URL from the ASGI scope, without also including the querystring.\n \"\"\"\n scheme = scope.get(\"scheme\", default_scheme)\n\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n\n if host:\n return \"%s://%s%s\" % (scheme, host, path)\n\n if server is not None:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port != default_port:\n return \"%s://%s:%s%s\" % (scheme, host, port, path)\n return \"%s://%s%s\" % (scheme, host, path)\n return path\n\n def _get_query(self, scope):\n # type: (Any) -> Any\n \"\"\"\n Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n qs = scope.get(\"query_string\")\n if not qs:\n return None\n return urllib.parse.unquote(qs.decode(\"latin-1\"))\n\n def _get_headers(self, scope):\n # type: (Any) -> Dict[str, str]\n \"\"\"\n Extract headers from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n headers = {} # type: Dict[str, str]\n for raw_key, raw_value in scope[\"headers\"]:\n key = raw_key.decode(\"latin-1\")\n value = raw_value.decode(\"latin-1\")\n if key in headers:\n headers[key] = headers[key] + \", \" + value\n else:\n headers[key] = value\n return headers\n", "path": "sentry_sdk/integrations/asgi.py"}], "after_files": [{"content": "\"\"\"\nAn ASGI middleware.\n\nBased on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n\"\"\"\n\nimport asyncio\nimport inspect\nimport urllib\n\nfrom sentry_sdk._functools import partial\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\nfrom sentry_sdk.utils import (\n ContextVar,\n event_from_exception,\n transaction_from_function,\n HAS_REAL_CONTEXTVARS,\n CONTEXTVARS_ERROR_MESSAGE,\n)\nfrom sentry_sdk.tracing import Transaction\n\nif MYPY:\n from typing import Dict\n from typing import Any\n from typing import Optional\n from typing import Callable\n\n from typing_extensions import Literal\n\n from sentry_sdk._types import Event, Hint\n\n\n_asgi_middleware_applied = ContextVar(\"sentry_asgi_middleware_applied\")\n\n_DEFAULT_TRANSACTION_NAME = \"generic ASGI request\"\n\n\ndef _capture_exception(hub, exc):\n # type: (Hub, Any) -> None\n\n # Check client here as it might have been unset while streaming response\n if hub.client is not None:\n event, hint = event_from_exception(\n exc,\n client_options=hub.client.options,\n mechanism={\"type\": \"asgi\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _looks_like_asgi3(app):\n # type: (Any) -> bool\n \"\"\"\n Try to figure out if an application object supports ASGI3.\n\n This is how uvicorn figures out the application version as well.\n \"\"\"\n if inspect.isclass(app):\n return hasattr(app, \"__await__\")\n elif inspect.isfunction(app):\n return asyncio.iscoroutinefunction(app)\n else:\n call = getattr(app, \"__call__\", None) # noqa\n return asyncio.iscoroutinefunction(call)\n\n\nclass SentryAsgiMiddleware:\n __slots__ = (\"app\", \"__call__\")\n\n def __init__(self, app, unsafe_context_data=False):\n # type: (Any, bool) -> None\n \"\"\"\n Instrument an ASGI application with Sentry. Provides HTTP/websocket\n data to sent events and basic handling for exceptions bubbling up\n through the middleware.\n\n :param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.\n \"\"\"\n\n if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise RuntimeError(\n \"The ASGI middleware for Sentry requires Python 3.7+ \"\n \"or the aiocontextvars package.\" + CONTEXTVARS_ERROR_MESSAGE\n )\n self.app = app\n\n if _looks_like_asgi3(app):\n self.__call__ = self._run_asgi3 # type: Callable[..., Any]\n else:\n self.__call__ = self._run_asgi2\n\n def _run_asgi2(self, scope):\n # type: (Any) -> Any\n async def inner(receive, send):\n # type: (Any, Any) -> Any\n return await self._run_app(scope, lambda: self.app(scope)(receive, send))\n\n return inner\n\n async def _run_asgi3(self, scope, receive, send):\n # type: (Any, Any, Any) -> Any\n return await self._run_app(scope, lambda: self.app(scope, receive, send))\n\n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n is_recursive_asgi_middleware = _asgi_middleware_applied.get(False)\n\n if is_recursive_asgi_middleware:\n try:\n return await callback()\n except Exception as exc:\n _capture_exception(Hub.current, exc)\n raise exc from None\n\n _asgi_middleware_applied.set(True)\n try:\n hub = Hub(Hub.current)\n with hub:\n with hub.configure_scope() as sentry_scope:\n sentry_scope.clear_breadcrumbs()\n sentry_scope._name = \"asgi\"\n processor = partial(self.event_processor, asgi_scope=scope)\n sentry_scope.add_event_processor(processor)\n\n ty = scope[\"type\"]\n\n if ty in (\"http\", \"websocket\"):\n transaction = Transaction.continue_from_headers(\n dict(scope[\"headers\"]),\n op=\"{}.server\".format(ty),\n )\n else:\n transaction = Transaction(op=\"asgi.server\")\n\n transaction.name = _DEFAULT_TRANSACTION_NAME\n transaction.set_tag(\"asgi.type\", ty)\n\n with hub.start_transaction(transaction):\n # XXX: Would be cool to have correct span status, but we\n # would have to wrap send(). That is a bit hard to do with\n # the current abstraction over ASGI 2/3.\n try:\n return await callback()\n except Exception as exc:\n _capture_exception(hub, exc)\n raise exc from None\n finally:\n _asgi_middleware_applied.set(False)\n\n def event_processor(self, event, hint, asgi_scope):\n # type: (Event, Hint, Any) -> Optional[Event]\n request_info = event.get(\"request\", {})\n\n ty = asgi_scope[\"type\"]\n if ty in (\"http\", \"websocket\"):\n request_info[\"method\"] = asgi_scope.get(\"method\")\n request_info[\"headers\"] = headers = _filter_headers(\n self._get_headers(asgi_scope)\n )\n request_info[\"query_string\"] = self._get_query(asgi_scope)\n\n request_info[\"url\"] = self._get_url(\n asgi_scope, \"http\" if ty == \"http\" else \"ws\", headers.get(\"host\")\n )\n\n client = asgi_scope.get(\"client\")\n if client and _should_send_default_pii():\n request_info[\"env\"] = {\"REMOTE_ADDR\": client[0]}\n\n if (\n event.get(\"transaction\", _DEFAULT_TRANSACTION_NAME)\n == _DEFAULT_TRANSACTION_NAME\n ):\n endpoint = asgi_scope.get(\"endpoint\")\n # Webframeworks like Starlette mutate the ASGI env once routing is\n # done, which is sometime after the request has started. If we have\n # an endpoint, overwrite our generic transaction name.\n if endpoint:\n event[\"transaction\"] = transaction_from_function(endpoint)\n\n event[\"request\"] = request_info\n\n return event\n\n # Helper functions for extracting request data.\n #\n # Note: Those functions are not public API. If you want to mutate request\n # data to your liking it's recommended to use the `before_send` callback\n # for that.\n\n def _get_url(self, scope, default_scheme, host):\n # type: (Dict[str, Any], Literal[\"ws\", \"http\"], Optional[str]) -> str\n \"\"\"\n Extract URL from the ASGI scope, without also including the querystring.\n \"\"\"\n scheme = scope.get(\"scheme\", default_scheme)\n\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n\n if host:\n return \"%s://%s%s\" % (scheme, host, path)\n\n if server is not None:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port != default_port:\n return \"%s://%s:%s%s\" % (scheme, host, port, path)\n return \"%s://%s%s\" % (scheme, host, path)\n return path\n\n def _get_query(self, scope):\n # type: (Any) -> Any\n \"\"\"\n Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n qs = scope.get(\"query_string\")\n if not qs:\n return None\n return urllib.parse.unquote(qs.decode(\"latin-1\"))\n\n def _get_headers(self, scope):\n # type: (Any) -> Dict[str, str]\n \"\"\"\n Extract headers from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n headers = {} # type: Dict[str, str]\n for raw_key, raw_value in scope[\"headers\"]:\n key = raw_key.decode(\"latin-1\")\n value = raw_value.decode(\"latin-1\")\n if key in headers:\n headers[key] = headers[key] + \", \" + value\n else:\n headers[key] = value\n return headers\n", "path": "sentry_sdk/integrations/asgi.py"}]}
| 2,939 | 186 |
gh_patches_debug_9132
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-10307
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] cElementTree has been deprecated and will be removed in favor of ElementTree
Reference : https://bugs.python.org/issue36543
```
bokeh/sampledata/us_states.py
33:import xml.etree.cElementTree as et
bokeh/sampledata/us_counties.py
40:import xml.etree.cElementTree as et
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/sampledata/us_counties.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 ''' This modules exposes geometry data for Unites States. It exposes a
8 dictionary ``data``, which is indexed by the two-tuples:
9
10 .. code-block:: python
11
12 (state_id, county_id)
13
14 that have the following dictionaries as the associated value:
15
16 .. code-block:: python
17
18 data[(1,1)]['name']
19 data[(1,1)]['state']
20 data[(1,1)]['detailed name']
21 data[(1,1)]['lats']
22 data[(1,1)]['lons']
23
24 Entries for ``'name'`` can have duplicates for certain states (e.g. Virginia).
25 The combination of ``'detailed name'`` and ``'state'`` will always be unique.
26
27 '''
28 #-----------------------------------------------------------------------------
29 # Boilerplate
30 #-----------------------------------------------------------------------------
31 import logging # isort:skip
32 log = logging.getLogger(__name__)
33
34 #-----------------------------------------------------------------------------
35 # Imports
36 #-----------------------------------------------------------------------------
37
38 # Standard library imports
39 import csv
40 import xml.etree.cElementTree as et
41
42 # Bokeh imports
43 from ..util.sampledata import external_path, open_csv
44
45 #-----------------------------------------------------------------------------
46 # Globals and constants
47 #-----------------------------------------------------------------------------
48
49 __all__ = (
50 'data',
51 )
52
53 #-----------------------------------------------------------------------------
54 # General API
55 #-----------------------------------------------------------------------------
56
57 #-----------------------------------------------------------------------------
58 # Dev API
59 #-----------------------------------------------------------------------------
60
61 #-----------------------------------------------------------------------------
62 # Private API
63 #-----------------------------------------------------------------------------
64
65 def _read_data():
66 '''
67
68 '''
69 nan = float('NaN')
70
71 data = {}
72
73 with open_csv(external_path('US_Counties.csv')) as f:
74 next(f)
75 reader = csv.reader(f, delimiter=str(','), quotechar=str('"'))
76 for row in reader:
77 name, dummy, state, dummy, geometry, dummy, dummy, dummy, det_name, state_id, county_id, dummy, dummy = row
78 xml = et.fromstring(geometry)
79 lats = []
80 lons = []
81 for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):
82 if i > 0:
83 lats.append(nan)
84 lons.append(nan)
85 coords = (c.split(',')[:2] for c in poly.text.split())
86 lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in
87 coords]))
88 lats.extend(lat)
89 lons.extend(lon)
90 data[(int(state_id), int(county_id))] = {
91 'name' : name,
92 'detailed name' : det_name,
93 'state' : state,
94 'lats' : lats,
95 'lons' : lons,
96 }
97
98 return data
99
100 #-----------------------------------------------------------------------------
101 # Code
102 #-----------------------------------------------------------------------------
103
104 data = _read_data()
105
```
Path: `bokeh/sampledata/us_states.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 '''
8 This modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is
9 indexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the
10 associated value:
11
12 data['CA']['name']
13 data['CA']['region']
14 data['CA']['lats']
15 data['CA']['lons']
16
17 '''
18
19 #-----------------------------------------------------------------------------
20 # Boilerplate
21 #-----------------------------------------------------------------------------
22 import logging # isort:skip
23 log = logging.getLogger(__name__)
24
25 #-----------------------------------------------------------------------------
26 # Imports
27 #-----------------------------------------------------------------------------
28
29 # Standard library imports
30 import codecs
31 import csv
32 import gzip
33 import xml.etree.cElementTree as et
34
35 # Bokeh imports
36 from ..util.sampledata import package_path
37
38 #-----------------------------------------------------------------------------
39 # Globals and constants
40 #-----------------------------------------------------------------------------
41
42 __all__ = (
43 'data',
44 )
45
46 #-----------------------------------------------------------------------------
47 # General API
48 #-----------------------------------------------------------------------------
49
50 #-----------------------------------------------------------------------------
51 # Dev API
52 #-----------------------------------------------------------------------------
53
54 #-----------------------------------------------------------------------------
55 # Private API
56 #-----------------------------------------------------------------------------
57
58 def _read_data():
59 '''
60
61 '''
62 nan = float('NaN')
63
64 data = {}
65
66 with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:
67 decoded = codecs.iterdecode(f, "utf-8")
68 next(decoded)
69 reader = csv.reader(decoded, delimiter=str(','), quotechar=str('"'))
70 for row in reader:
71 region, name, code, geometry, dummy = row
72 xml = et.fromstring(geometry)
73 lats = []
74 lons = []
75 for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):
76 if i > 0:
77 lats.append(nan)
78 lons.append(nan)
79 coords = (c.split(',')[:2] for c in poly.text.split())
80 lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in
81 coords]))
82 lats.extend(lat)
83 lons.extend(lon)
84 data[code] = {
85 'name' : name,
86 'region' : region,
87 'lats' : lats,
88 'lons' : lons,
89 }
90
91 return data
92
93 #-----------------------------------------------------------------------------
94 # Code
95 #-----------------------------------------------------------------------------
96
97 data = _read_data()
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/sampledata/us_counties.py b/bokeh/sampledata/us_counties.py
--- a/bokeh/sampledata/us_counties.py
+++ b/bokeh/sampledata/us_counties.py
@@ -37,7 +37,7 @@
# Standard library imports
import csv
-import xml.etree.cElementTree as et
+import xml.etree.ElementTree as et
# Bokeh imports
from ..util.sampledata import external_path, open_csv
diff --git a/bokeh/sampledata/us_states.py b/bokeh/sampledata/us_states.py
--- a/bokeh/sampledata/us_states.py
+++ b/bokeh/sampledata/us_states.py
@@ -30,7 +30,7 @@
import codecs
import csv
import gzip
-import xml.etree.cElementTree as et
+import xml.etree.ElementTree as et
# Bokeh imports
from ..util.sampledata import package_path
|
{"golden_diff": "diff --git a/bokeh/sampledata/us_counties.py b/bokeh/sampledata/us_counties.py\n--- a/bokeh/sampledata/us_counties.py\n+++ b/bokeh/sampledata/us_counties.py\n@@ -37,7 +37,7 @@\n \n # Standard library imports\n import csv\n-import xml.etree.cElementTree as et\n+import xml.etree.ElementTree as et\n \n # Bokeh imports\n from ..util.sampledata import external_path, open_csv\ndiff --git a/bokeh/sampledata/us_states.py b/bokeh/sampledata/us_states.py\n--- a/bokeh/sampledata/us_states.py\n+++ b/bokeh/sampledata/us_states.py\n@@ -30,7 +30,7 @@\n import codecs\n import csv\n import gzip\n-import xml.etree.cElementTree as et\n+import xml.etree.ElementTree as et\n \n # Bokeh imports\n from ..util.sampledata import package_path\n", "issue": "[BUG] cElementTree has been deprecated and will be removed in favor of ElementTree\nReference : https://bugs.python.org/issue36543\r\n\r\n```\r\nbokeh/sampledata/us_states.py\r\n33:import xml.etree.cElementTree as et\r\n\r\nbokeh/sampledata/us_counties.py\r\n40:import xml.etree.cElementTree as et\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' This modules exposes geometry data for Unites States. It exposes a\ndictionary ``data``, which is indexed by the two-tuples:\n\n.. code-block:: python\n\n (state_id, county_id)\n\nthat have the following dictionaries as the associated value:\n\n.. code-block:: python\n\n data[(1,1)]['name']\n data[(1,1)]['state']\n data[(1,1)]['detailed name']\n data[(1,1)]['lats']\n data[(1,1)]['lons']\n\nEntries for ``'name'`` can have duplicates for certain states (e.g. Virginia).\nThe combination of ``'detailed name'`` and ``'state'`` will always be unique.\n\n'''\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport csv\nimport xml.etree.cElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import external_path, open_csv\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with open_csv(external_path('US_Counties.csv')) as f:\n next(f)\n reader = csv.reader(f, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n name, dummy, state, dummy, geometry, dummy, dummy, dummy, det_name, state_id, county_id, dummy, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[(int(state_id), int(county_id))] = {\n 'name' : name,\n 'detailed name' : det_name,\n 'state' : state,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_counties.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\nThis modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is\nindexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the\nassociated value:\n\n data['CA']['name']\n data['CA']['region']\n data['CA']['lats']\n data['CA']['lons']\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport codecs\nimport csv\nimport gzip\nimport xml.etree.cElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import package_path\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:\n decoded = codecs.iterdecode(f, \"utf-8\")\n next(decoded)\n reader = csv.reader(decoded, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n region, name, code, geometry, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[code] = {\n 'name' : name,\n 'region' : region,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_states.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' This modules exposes geometry data for Unites States. It exposes a\ndictionary ``data``, which is indexed by the two-tuples:\n\n.. code-block:: python\n\n (state_id, county_id)\n\nthat have the following dictionaries as the associated value:\n\n.. code-block:: python\n\n data[(1,1)]['name']\n data[(1,1)]['state']\n data[(1,1)]['detailed name']\n data[(1,1)]['lats']\n data[(1,1)]['lons']\n\nEntries for ``'name'`` can have duplicates for certain states (e.g. Virginia).\nThe combination of ``'detailed name'`` and ``'state'`` will always be unique.\n\n'''\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport csv\nimport xml.etree.ElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import external_path, open_csv\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with open_csv(external_path('US_Counties.csv')) as f:\n next(f)\n reader = csv.reader(f, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n name, dummy, state, dummy, geometry, dummy, dummy, dummy, det_name, state_id, county_id, dummy, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[(int(state_id), int(county_id))] = {\n 'name' : name,\n 'detailed name' : det_name,\n 'state' : state,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_counties.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\nThis modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is\nindexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the\nassociated value:\n\n data['CA']['name']\n data['CA']['region']\n data['CA']['lats']\n data['CA']['lons']\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport codecs\nimport csv\nimport gzip\nimport xml.etree.ElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import package_path\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:\n decoded = codecs.iterdecode(f, \"utf-8\")\n next(decoded)\n reader = csv.reader(decoded, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n region, name, code, geometry, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[code] = {\n 'name' : name,\n 'region' : region,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_states.py"}]}
| 1,931 | 205 |
gh_patches_debug_8327
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-3652
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty completion.timestamp_format crashes
After `:set completion.timestamp_format ''`:
```
17:26:29 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 260, in _update_completion
completion.set_pattern(pattern)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completionwidget.py", line 320, in set_pattern
self.model().set_pattern(pattern)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/completionmodel.py", line 185, in set_pattern
cat.set_pattern(pattern)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 85, in set_pattern
.format(timestamp_format.replace("'", "`")))
AttributeError: 'NoneType' object has no attribute 'replace'
```
cc @rcorre and @erikdsjostrom who reported this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/models/histcategory.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017-2018 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """A completion category that queries the SQL History store."""
21
22 from PyQt5.QtSql import QSqlQueryModel
23
24 from qutebrowser.misc import sql
25 from qutebrowser.utils import debug
26 from qutebrowser.config import config
27
28
29 class HistoryCategory(QSqlQueryModel):
30
31 """A completion category that queries the SQL History store."""
32
33 def __init__(self, *, delete_func=None, parent=None):
34 """Create a new History completion category."""
35 super().__init__(parent=parent)
36 self.name = "History"
37 self._query = None
38
39 # advertise that this model filters by URL and title
40 self.columns_to_filter = [0, 1]
41 self.delete_func = delete_func
42
43 def _atime_expr(self):
44 """If max_items is set, return an expression to limit the query."""
45 max_items = config.val.completion.web_history_max_items
46 # HistoryCategory should not be added to the completion in that case.
47 assert max_items != 0
48
49 if max_items < 0:
50 return ''
51
52 min_atime = sql.Query(' '.join([
53 'SELECT min(last_atime) FROM',
54 '(SELECT last_atime FROM CompletionHistory',
55 'ORDER BY last_atime DESC LIMIT :limit)',
56 ])).run(limit=max_items).value()
57
58 if not min_atime:
59 # if there are no history items, min_atime may be '' (issue #2849)
60 return ''
61
62 return "AND last_atime >= {}".format(min_atime)
63
64 def set_pattern(self, pattern):
65 """Set the pattern used to filter results.
66
67 Args:
68 pattern: string pattern to filter by.
69 """
70 # escape to treat a user input % or _ as a literal, not a wildcard
71 pattern = pattern.replace('%', '\\%')
72 pattern = pattern.replace('_', '\\_')
73 words = ['%{}%'.format(w) for w in pattern.split(' ')]
74
75 # build a where clause to match all of the words in any order
76 # given the search term "a b", the WHERE clause would be:
77 # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')
78 where_clause = ' AND '.join(
79 "(url || title) LIKE :{} escape '\\'".format(i)
80 for i in range(len(words)))
81
82 # replace ' in timestamp-format to avoid breaking the query
83 timestamp_format = config.val.completion.timestamp_format
84 timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
85 .format(timestamp_format.replace("'", "`")))
86
87 if not self._query or len(words) != len(self._query.boundValues()):
88 # if the number of words changed, we need to generate a new query
89 # otherwise, we can reuse the prepared query for performance
90 self._query = sql.Query(' '.join([
91 "SELECT url, title, {}".format(timefmt),
92 "FROM CompletionHistory",
93 # the incoming pattern will have literal % and _ escaped
94 # we need to tell sql to treat '\' as an escape character
95 'WHERE ({})'.format(where_clause),
96 self._atime_expr(),
97 "ORDER BY last_atime DESC",
98 ]), forward_only=False)
99
100 with debug.log_time('sql', 'Running completion query'):
101 self._query.run(**{
102 str(i): w for i, w in enumerate(words)})
103 self.setQuery(self._query)
104
105 def removeRows(self, row, _count, _parent=None):
106 """Override QAbstractItemModel::removeRows to re-run sql query."""
107 # re-run query to reload updated table
108 with debug.log_time('sql', 'Re-running completion query post-delete'):
109 self._query.run()
110 self.setQuery(self._query)
111 while self.rowCount() < row:
112 self.fetchMore()
113 return True
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py
--- a/qutebrowser/completion/models/histcategory.py
+++ b/qutebrowser/completion/models/histcategory.py
@@ -80,7 +80,7 @@
for i in range(len(words)))
# replace ' in timestamp-format to avoid breaking the query
- timestamp_format = config.val.completion.timestamp_format
+ timestamp_format = config.val.completion.timestamp_format or ''
timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
.format(timestamp_format.replace("'", "`")))
|
{"golden_diff": "diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -80,7 +80,7 @@\n for i in range(len(words)))\n \n # replace ' in timestamp-format to avoid breaking the query\n- timestamp_format = config.val.completion.timestamp_format\n+ timestamp_format = config.val.completion.timestamp_format or ''\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n", "issue": "Empty completion.timestamp_format crashes\nAfter `:set completion.timestamp_format ''`:\r\n\r\n```\r\n17:26:29 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 260, in _update_completion\r\n completion.set_pattern(pattern)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completionwidget.py\", line 320, in set_pattern\r\n self.model().set_pattern(pattern)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/completionmodel.py\", line 185, in set_pattern\r\n cat.set_pattern(pattern)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\", line 85, in set_pattern\r\n .format(timestamp_format.replace(\"'\", \"`\")))\r\nAttributeError: 'NoneType' object has no attribute 'replace'\r\n```\r\n\r\ncc @rcorre and @erikdsjostrom who reported this\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n self._query = None\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.val.completion.web_history_max_items\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n words = ['%{}%'.format(w) for w in pattern.split(' ')]\n\n # build a where clause to match all of the words in any order\n # given the search term \"a b\", the WHERE clause would be:\n # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')\n where_clause = ' AND '.join(\n \"(url || title) LIKE :{} escape '\\\\'\".format(i)\n for i in range(len(words)))\n\n # replace ' in timestamp-format to avoid breaking the query\n timestamp_format = config.val.completion.timestamp_format\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n\n if not self._query or len(words) != len(self._query.boundValues()):\n # if the number of words changed, we need to generate a new query\n # otherwise, we can reuse the prepared query for performance\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped\n # we need to tell sql to treat '\\' as an escape character\n 'WHERE ({})'.format(where_clause),\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(**{\n str(i): w for i, w in enumerate(words)})\n self.setQuery(self._query)\n\n def removeRows(self, row, _count, _parent=None):\n \"\"\"Override QAbstractItemModel::removeRows to re-run sql query.\"\"\"\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n while self.rowCount() < row:\n self.fetchMore()\n return True\n", "path": "qutebrowser/completion/models/histcategory.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n self._query = None\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.val.completion.web_history_max_items\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n words = ['%{}%'.format(w) for w in pattern.split(' ')]\n\n # build a where clause to match all of the words in any order\n # given the search term \"a b\", the WHERE clause would be:\n # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')\n where_clause = ' AND '.join(\n \"(url || title) LIKE :{} escape '\\\\'\".format(i)\n for i in range(len(words)))\n\n # replace ' in timestamp-format to avoid breaking the query\n timestamp_format = config.val.completion.timestamp_format or ''\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n\n if not self._query or len(words) != len(self._query.boundValues()):\n # if the number of words changed, we need to generate a new query\n # otherwise, we can reuse the prepared query for performance\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped\n # we need to tell sql to treat '\\' as an escape character\n 'WHERE ({})'.format(where_clause),\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(**{\n str(i): w for i, w in enumerate(words)})\n self.setQuery(self._query)\n\n def removeRows(self, row, _count, _parent=None):\n \"\"\"Override QAbstractItemModel::removeRows to re-run sql query.\"\"\"\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n while self.rowCount() < row:\n self.fetchMore()\n return True\n", "path": "qutebrowser/completion/models/histcategory.py"}]}
| 1,777 | 140 |
gh_patches_debug_38751
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-116
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python Model download for GCS and S3
Downloading from GCS and S3 needs to be completed.
https://github.com/kubeflow/kfserving/blob/2f8d33d1a9773c5694a22ba749192163251fe287/python/kfserving/kfserving/storage.py#L27-L33
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/kfserving/storage.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import tempfile
17 import os
18
19 _GCS_PREFIX = "gs://"
20 _S3_PREFIX = "s3://"
21 _LOCAL_PREFIX = "file://"
22
23
24 class Storage(object):
25 @staticmethod
26 def download(uri: str) -> str:
27 logging.info("Copying contents of %s to local" % uri)
28 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
29 return Storage._download_local(uri)
30
31 temp_dir = tempfile.mkdtemp()
32 if uri.startswith(_GCS_PREFIX):
33 Storage._download_gcs(uri, temp_dir)
34 elif uri.startswith(_S3_PREFIX):
35 Storage._download_s3(uri, temp_dir)
36 else:
37 raise Exception("Cannot recognize storage type for " + uri +
38 "\n'%s', '%s', and '%s' are the current available storage type." %
39 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
40
41 logging.info("Successfully copied %s to %s" % (uri, temp_dir))
42 return temp_dir
43
44 @staticmethod
45 def _download_s3(uri, temp_dir: str):
46 raise NotImplementedError
47
48 @staticmethod
49 def _download_gcs(uri, temp_dir: str):
50 raise NotImplementedError
51
52 @staticmethod
53 def _download_local(uri):
54 local_path = uri.replace(_LOCAL_PREFIX, "", 1)
55 if not os.path.exists(local_path):
56 raise Exception("Local path %s does not exist." % (uri))
57 return local_path
58
```
Path: `python/kfserving/setup.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 setup(
24 name='kfserver',
25 version='0.1.0',
26 author_email='[email protected]',
27 license='../../LICENSE.txt',
28 url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',
29 description='Model Server for arbitrary python ML frameworks.',
30 long_description=open('README.md').read(),
31 python_requires='>3.4',
32 packages=find_packages("kfserving"),
33 install_requires=[
34 "tornado >= 1.4.1",
35 "argparse >= 1.4.0",
36 "numpy"
37 ],
38 tests_require=tests_require,
39 extras_require={'test': tests_require}
40 )
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py
--- a/python/kfserving/kfserving/storage.py
+++ b/python/kfserving/kfserving/storage.py
@@ -15,6 +15,10 @@
import logging
import tempfile
import os
+import re
+from minio import Minio
+from google.cloud import storage
+from google.auth import exceptions
_GCS_PREFIX = "gs://"
_S3_PREFIX = "s3://"
@@ -43,11 +47,36 @@
@staticmethod
def _download_s3(uri, temp_dir: str):
- raise NotImplementedError
+ client = Storage._create_minio_client()
+ bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
+ bucket_name = bucket_args[0]
+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
+ objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
+ for obj in objects:
+ # Replace any prefix from the object key with temp_dir
+ subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
+ client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))
@staticmethod
def _download_gcs(uri, temp_dir: str):
- raise NotImplementedError
+ try:
+ storage_client = storage.Client()
+ except exceptions.DefaultCredentialsError as e:
+ storage_client = storage.Client.create_anonymous_client()
+ bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
+ bucket_name = bucket_args[0]
+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
+ bucket = storage_client.bucket(bucket_name)
+ blobs = bucket.list_blobs(prefix=bucket_path)
+ for blob in blobs:
+ # Replace any prefix from the object key with temp_dir
+ subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
+ # Create necessary subdirectory to store the object locally
+ if "/" in subdir_object_key:
+ local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
+ if not os.path.isdir(local_object_dir):
+ os.makedirs(local_object_dir, exist_ok=True)
+ blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))
@staticmethod
def _download_local(uri):
@@ -55,3 +84,13 @@
if not os.path.exists(local_path):
raise Exception("Local path %s does not exist." % (uri))
return local_path
+
+ @staticmethod
+ def _create_minio_client():
+ # Remove possible http scheme for Minio
+ url = re.compile(r"https?://")
+ minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")),
+ access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
+ secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
+ secure=True)
+ return minioClient
diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py
--- a/python/kfserving/setup.py
+++ b/python/kfserving/setup.py
@@ -33,6 +33,8 @@
install_requires=[
"tornado >= 1.4.1",
"argparse >= 1.4.0",
+ "minio >= 4.0.9",
+ "google-cloud-storage >= 1.16.0",
"numpy"
],
tests_require=tests_require,
|
{"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -15,6 +15,10 @@\n import logging\n import tempfile\n import os\n+import re\n+from minio import Minio\n+from google.cloud import storage\n+from google.auth import exceptions\n \n _GCS_PREFIX = \"gs://\"\n _S3_PREFIX = \"s3://\"\n@@ -43,11 +47,36 @@\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n- raise NotImplementedError\n+ client = Storage._create_minio_client()\n+ bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n+ bucket_name = bucket_args[0]\n+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n+ objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n+ for obj in objects:\n+ # Replace any prefix from the object key with temp_dir\n+ subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n+ client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))\n \n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n- raise NotImplementedError\n+ try:\n+ storage_client = storage.Client()\n+ except exceptions.DefaultCredentialsError as e:\n+ storage_client = storage.Client.create_anonymous_client()\n+ bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n+ bucket_name = bucket_args[0]\n+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n+ bucket = storage_client.bucket(bucket_name)\n+ blobs = bucket.list_blobs(prefix=bucket_path)\n+ for blob in blobs:\n+ # Replace any prefix from the object key with temp_dir\n+ subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n+ # Create necessary subdirectory to store the object locally\n+ if \"/\" in subdir_object_key:\n+ local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n+ if not os.path.isdir(local_object_dir):\n+ os.makedirs(local_object_dir, exist_ok=True)\n+ blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n \n @staticmethod\n def _download_local(uri):\n@@ -55,3 +84,13 @@\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n+\n+ @staticmethod\n+ def _create_minio_client():\n+ # Remove possible http scheme for Minio\n+ url = re.compile(r\"https?://\")\n+ minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n+ access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n+ secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n+ secure=True)\n+ return minioClient\ndiff --git a/python/kfserving/setup.py b/python/kfserving/setup.py\n--- a/python/kfserving/setup.py\n+++ b/python/kfserving/setup.py\n@@ -33,6 +33,8 @@\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n+ \"minio >= 4.0.9\",\n+ \"google-cloud-storage >= 1.16.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n", "issue": "Python Model download for GCS and S3\nDownloading from GCS and S3 needs to be completed.\r\n\r\nhttps://github.com/kubeflow/kfserving/blob/2f8d33d1a9773c5694a22ba749192163251fe287/python/kfserving/kfserving/storage.py#L27-L33\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object):\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\" % uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\" % (uri, temp_dir))\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n raise NotImplementedError\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n raise NotImplementedError\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n", "path": "python/kfserving/kfserving/storage.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='kfserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',\n description='Model Server for arbitrary python ML frameworks.',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"kfserving\"),\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/kfserving/setup.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\nimport re\nfrom minio import Minio\nfrom google.cloud import storage\nfrom google.auth import exceptions\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object):\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\" % uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\" % (uri, temp_dir))\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError as e:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n blobs = bucket.list_blobs(prefix=bucket_path)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n\n @staticmethod\n def _create_minio_client():\n # Remove possible http scheme for Minio\n url = re.compile(r\"https?://\")\n minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n secure=True)\n return minioClient\n", "path": "python/kfserving/kfserving/storage.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='kfserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',\n description='Model Server for arbitrary python ML frameworks.',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"kfserving\"),\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n \"minio >= 4.0.9\",\n \"google-cloud-storage >= 1.16.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/kfserving/setup.py"}]}
| 1,318 | 816 |
gh_patches_debug_21026
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1007
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plot_ppc - KeyError: "['y'] var names are not present in dataset"
Hi, I've had troubles using `plot_ppc` in a project I'm working on, so I tried to reproduce it using the quickstart pystan example. This is the code I'm using:
```python
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real theta[J];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
theta ~ normal(mu, tau);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
schools_dat = {'J': 8,
'y': [28, 8, -3, 7, -1, 1, 18, 12],
'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}
sm = pystan.StanModel(model_code=schools_code, verbose=False)
fit = sm.sampling(data=schools_dat, iter=1000, chains=4)
data = az.from_pystan(posterior=fit,
posterior_predictive='y_hat',
observed_data=['y'],
log_likelihood='log_lik',
coords={'school': schools},
dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})
az.plot_ppc(data)
```
This raises:
```python
KeyError: "['y'] var names are not present in dataset"
```
which is exactly the kind of error I got in my own project... I guess I'm doing something wrong, here, but I can't figure out what it is.
Any help would be appreciated... thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/ppcplot.py`
Content:
```
1 """Posterior/Prior predictive plot."""
2 from numbers import Integral
3 import platform
4 import logging
5 import numpy as np
6
7 from .plot_utils import (
8 xarray_var_iter,
9 _scale_fig_size,
10 default_grid,
11 filter_plotters_list,
12 get_plotting_function,
13 )
14 from ..utils import _var_names
15
16 _log = logging.getLogger(__name__)
17
18
19 def plot_ppc(
20 data,
21 kind="kde",
22 alpha=None,
23 mean=True,
24 figsize=None,
25 textsize=None,
26 data_pairs=None,
27 var_names=None,
28 coords=None,
29 flatten=None,
30 flatten_pp=None,
31 num_pp_samples=None,
32 random_seed=None,
33 jitter=None,
34 animated=False,
35 animation_kwargs=None,
36 legend=True,
37 ax=None,
38 backend=None,
39 backend_kwargs=None,
40 group="posterior",
41 show=None,
42 ):
43 """
44 Plot for posterior/prior predictive checks.
45
46 Parameters
47 ----------
48 data : az.InferenceData object
49 InferenceData object containing the observed and posterior/prior predictive data.
50 kind : str
51 Type of plot to display (kde, cumulative, or scatter). Defaults to kde.
52 alpha : float
53 Opacity of posterior/prior predictive density curves.
54 Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7
55 mean : bool
56 Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True
57 figsize : tuple
58 Figure size. If None it will be defined automatically.
59 textsize: float
60 Text size scaling factor for labels, titles and lines. If None it will be
61 autoscaled based on figsize.
62 data_pairs : dict
63 Dictionary containing relations between observed data and posterior/prior predictive data.
64 Dictionary structure:
65 Key = data var_name
66 Value = posterior/prior predictive var_name
67 For example, `data_pairs = {'y' : 'y_hat'}`
68 If None, it will assume that the observed data and the posterior/prior
69 predictive data have the same variable name.
70 var_names : list
71 List of variables to be plotted. Defaults to all observed variables in the
72 model if None.
73 coords : dict
74 Dictionary mapping dimensions to selected coordinates to be plotted.
75 Dimensions without a mapping specified will include all coordinates for
76 that dimension. Defaults to including all coordinates for all
77 dimensions if None.
78 flatten : list
79 List of dimensions to flatten in observed_data. Only flattens across the coordinates
80 specified in the coords argument. Defaults to flattening all of the dimensions.
81 flatten_pp : list
82 List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
83 across the coordinates specified in the coords argument. Defaults to flattening all
84 of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
85 parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
86 num_pp_samples : int
87 The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and
88 `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
89 unless defined otherwise. Otherwise it defaults to all provided samples.
90 random_seed : int
91 Random number generator seed passed to numpy.random.seed to allow
92 reproducibility of the plot. By default, no seed will be provided
93 and the plot will change each call if a random sample is specified
94 by `num_pp_samples`.
95 jitter : float
96 If kind is "scatter", jitter will add random uniform noise to the height
97 of the ppc samples and observed data. By default 0.
98 animated : bool
99 Create an animation of one posterior/prior predictive sample per frame. Defaults to False.
100 animation_kwargs : dict
101 Keywords passed to `animation.FuncAnimation`.
102 legend : bool
103 Add legend to figure. By default True.
104 ax: axes, optional
105 Matplotlib axes or bokeh figures.
106 backend: str, optional
107 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
108 backend_kwargs: bool, optional
109 These are kwargs specific to the backend being used. For additional documentation
110 check the plotting method of the backend.
111 group : {"prior", "posterior"}, optional
112 Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
113 Other value can be 'prior'.
114 show : bool, optional
115 Call backend show function.
116
117 Returns
118 -------
119 axes : matplotlib axes or bokeh figures
120
121 Examples
122 --------
123 Plot the observed data KDE overlaid on posterior predictive KDEs.
124
125 .. plot::
126 :context: close-figs
127
128 >>> import arviz as az
129 >>> data = az.load_arviz_data('radon')
130 >>> az.plot_ppc(data,data_pairs={"obs":"obs"})
131
132 Plot the overlay with empirical CDFs.
133
134 .. plot::
135 :context: close-figs
136
137 >>> az.plot_ppc(data, kind='cumulative')
138
139 Use the coords and flatten parameters to plot selected variable dimensions
140 across multiple plots.
141
142 .. plot::
143 :context: close-figs
144
145 >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
146
147 Plot the overlay using a stacked scatter plot that is particularly useful
148 when the sample sizes are small.
149
150 .. plot::
151 :context: close-figs
152
153 >>> az.plot_ppc(data, kind='scatter', flatten=[],
154 >>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
155
156 Plot random posterior predictive sub-samples.
157
158 .. plot::
159 :context: close-figs
160
161 >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
162 """
163 if group not in ("posterior", "prior"):
164 raise TypeError("`group` argument must be either `posterior` or `prior`")
165
166 for groups in ("{}_predictive".format(group), "observed_data"):
167 if not hasattr(data, groups):
168 raise TypeError(
169 '`data` argument must have the group "{group}" for ppcplot'.format(group=groups)
170 )
171
172 if kind.lower() not in ("kde", "cumulative", "scatter"):
173 raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
174
175 if data_pairs is None:
176 data_pairs = {}
177
178 if animation_kwargs is None:
179 animation_kwargs = {}
180 if platform.system() == "Linux":
181 animation_kwargs.setdefault("blit", True)
182 else:
183 animation_kwargs.setdefault("blit", False)
184
185 if animated and backend == "bokeh":
186 raise TypeError("Animation option is only supported with matplotlib backend.")
187
188 if animated and animation_kwargs["blit"] and platform.system() != "Linux":
189 _log.warning(
190 "If you experience problems rendering the animation try setting"
191 "`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)"
192 )
193
194 if alpha is None:
195 if animated:
196 alpha = 1
197 else:
198 if kind.lower() == "scatter":
199 alpha = 0.7
200 else:
201 alpha = 0.2
202
203 if jitter is None:
204 jitter = 0.0
205 assert jitter >= 0.0
206
207 observed = data.observed_data
208
209 if group == "posterior":
210 predictive_dataset = data.posterior_predictive
211 elif group == "prior":
212 predictive_dataset = data.prior_predictive
213
214 if var_names is None:
215 var_names = list(observed.data_vars)
216 var_names = _var_names(var_names, observed)
217 pp_var_names = [data_pairs.get(var, var) for var in var_names]
218 pp_var_names = _var_names(pp_var_names, predictive_dataset)
219
220 if flatten_pp is None and flatten is None:
221 flatten_pp = list(predictive_dataset.dims.keys())
222 elif flatten_pp is None:
223 flatten_pp = flatten
224 if flatten is None:
225 flatten = list(observed.dims.keys())
226
227 if coords is None:
228 coords = {}
229
230 if random_seed is not None:
231 np.random.seed(random_seed)
232
233 total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
234 if num_pp_samples is None:
235 if kind == "scatter" and not animated:
236 num_pp_samples = min(5, total_pp_samples)
237 else:
238 num_pp_samples = total_pp_samples
239
240 if (
241 not isinstance(num_pp_samples, Integral)
242 or num_pp_samples < 1
243 or num_pp_samples > total_pp_samples
244 ):
245 raise TypeError(
246 "`num_pp_samples` must be an integer between 1 and "
247 + "{limit}.".format(limit=total_pp_samples)
248 )
249
250 pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
251
252 for key in coords.keys():
253 coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
254
255 obs_plotters = filter_plotters_list(
256 list(
257 xarray_var_iter(
258 observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
259 )
260 ),
261 "plot_ppc",
262 )
263 length_plotters = len(obs_plotters)
264 pp_plotters = [
265 tup
266 for _, tup in zip(
267 range(length_plotters),
268 xarray_var_iter(
269 predictive_dataset.isel(coords),
270 var_names=pp_var_names,
271 skip_dims=set(flatten_pp),
272 combined=True,
273 ),
274 )
275 ]
276 rows, cols = default_grid(length_plotters)
277
278 (figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(
279 figsize, textsize, rows, cols
280 )
281
282 ppcplot_kwargs = dict(
283 ax=ax,
284 length_plotters=length_plotters,
285 rows=rows,
286 cols=cols,
287 figsize=figsize,
288 animated=animated,
289 obs_plotters=obs_plotters,
290 pp_plotters=pp_plotters,
291 predictive_dataset=predictive_dataset,
292 pp_sample_ix=pp_sample_ix,
293 kind=kind,
294 alpha=alpha,
295 linewidth=linewidth,
296 mean=mean,
297 xt_labelsize=xt_labelsize,
298 ax_labelsize=ax_labelsize,
299 jitter=jitter,
300 total_pp_samples=total_pp_samples,
301 legend=legend,
302 markersize=markersize,
303 animation_kwargs=animation_kwargs,
304 num_pp_samples=num_pp_samples,
305 backend_kwargs=backend_kwargs,
306 show=show,
307 )
308
309 if backend == "bokeh":
310
311 ppcplot_kwargs.pop("animated")
312 ppcplot_kwargs.pop("animation_kwargs")
313 ppcplot_kwargs.pop("legend")
314 ppcplot_kwargs.pop("xt_labelsize")
315 ppcplot_kwargs.pop("ax_labelsize")
316
317 # TODO: Add backend kwargs
318 plot = get_plotting_function("plot_ppc", "ppcplot", backend)
319 axes = plot(**ppcplot_kwargs)
320 return axes
321
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/arviz/plots/ppcplot.py b/arviz/plots/ppcplot.py
--- a/arviz/plots/ppcplot.py
+++ b/arviz/plots/ppcplot.py
@@ -62,8 +62,8 @@
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- Key = data var_name
- Value = posterior/prior predictive var_name
+ key = data var_name
+ value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
@@ -128,6 +128,7 @@
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data,data_pairs={"obs":"obs"})
+ >>> #az.plot_ppc(data,data_pairs={"obs":"obs_hat"})
Plot the overlay with empirical CDFs.
|
{"golden_diff": "diff --git a/arviz/plots/ppcplot.py b/arviz/plots/ppcplot.py\n--- a/arviz/plots/ppcplot.py\n+++ b/arviz/plots/ppcplot.py\n@@ -62,8 +62,8 @@\n data_pairs : dict\n Dictionary containing relations between observed data and posterior/prior predictive data.\n Dictionary structure:\n- Key = data var_name\n- Value = posterior/prior predictive var_name\n+ key = data var_name\n+ value = posterior/prior predictive var_name\n For example, `data_pairs = {'y' : 'y_hat'}`\n If None, it will assume that the observed data and the posterior/prior\n predictive data have the same variable name.\n@@ -128,6 +128,7 @@\n >>> import arviz as az\n >>> data = az.load_arviz_data('radon')\n >>> az.plot_ppc(data,data_pairs={\"obs\":\"obs\"})\n+ >>> #az.plot_ppc(data,data_pairs={\"obs\":\"obs_hat\"})\n \n Plot the overlay with empirical CDFs.\n", "issue": "plot_ppc - KeyError: \"['y'] var names are not present in dataset\"\nHi, I've had troubles using `plot_ppc` in a project I'm working on, so I tried to reproduce it using the quickstart pystan example. This is the code I'm using:\r\n\r\n```python\r\nschools_code = \"\"\"\r\ndata {\r\n int<lower=0> J;\r\n real y[J];\r\n real<lower=0> sigma[J];\r\n}\r\n\r\nparameters {\r\n real mu;\r\n real<lower=0> tau;\r\n real theta[J];\r\n}\r\n\r\nmodel {\r\n mu ~ normal(0, 5);\r\n tau ~ cauchy(0, 5);\r\n theta ~ normal(mu, tau);\r\n y ~ normal(theta, sigma);\r\n}\r\ngenerated quantities {\r\n vector[J] log_lik;\r\n vector[J] y_hat;\r\n for (j in 1:J) {\r\n log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);\r\n y_hat[j] = normal_rng(theta[j], sigma[j]);\r\n }\r\n}\r\n\"\"\"\r\n\r\nschools_dat = {'J': 8,\r\n 'y': [28, 8, -3, 7, -1, 1, 18, 12],\r\n 'sigma': [15, 10, 16, 11, 9, 11, 10, 18]}\r\n\r\nsm = pystan.StanModel(model_code=schools_code, verbose=False)\r\nfit = sm.sampling(data=schools_dat, iter=1000, chains=4)\r\n\r\ndata = az.from_pystan(posterior=fit,\r\n posterior_predictive='y_hat',\r\n observed_data=['y'],\r\n log_likelihood='log_lik',\r\n coords={'school': schools},\r\n dims={'theta': ['school'], 'y': ['school'], 'log_lik': ['school'], 'y_hat': ['school'], 'theta_tilde': ['school']})\r\n\r\naz.plot_ppc(data)\r\n```\r\n\r\nThis raises:\r\n\r\n```python\r\nKeyError: \"['y'] var names are not present in dataset\"\r\n```\r\n\r\nwhich is exactly the kind of error I got in my own project... I guess I'm doing something wrong, here, but I can't figure out what it is.\r\n\r\nAny help would be appreciated... thanks!\n", "before_files": [{"content": "\"\"\"Posterior/Prior predictive plot.\"\"\"\nfrom numbers import Integral\nimport platform\nimport logging\nimport numpy as np\n\nfrom .plot_utils import (\n xarray_var_iter,\n _scale_fig_size,\n default_grid,\n filter_plotters_list,\n get_plotting_function,\n)\nfrom ..utils import _var_names\n\n_log = logging.getLogger(__name__)\n\n\ndef plot_ppc(\n data,\n kind=\"kde\",\n alpha=None,\n mean=True,\n figsize=None,\n textsize=None,\n data_pairs=None,\n var_names=None,\n coords=None,\n flatten=None,\n flatten_pp=None,\n num_pp_samples=None,\n random_seed=None,\n jitter=None,\n animated=False,\n animation_kwargs=None,\n legend=True,\n ax=None,\n backend=None,\n backend_kwargs=None,\n group=\"posterior\",\n show=None,\n):\n \"\"\"\n Plot for posterior/prior predictive checks.\n\n Parameters\n ----------\n data : az.InferenceData object\n InferenceData object containing the observed and posterior/prior predictive data.\n kind : str\n Type of plot to display (kde, cumulative, or scatter). Defaults to kde.\n alpha : float\n Opacity of posterior/prior predictive density curves.\n Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7\n mean : bool\n Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be\n autoscaled based on figsize.\n data_pairs : dict\n Dictionary containing relations between observed data and posterior/prior predictive data.\n Dictionary structure:\n Key = data var_name\n Value = posterior/prior predictive var_name\n For example, `data_pairs = {'y' : 'y_hat'}`\n If None, it will assume that the observed data and the posterior/prior\n predictive data have the same variable name.\n var_names : list\n List of variables to be plotted. Defaults to all observed variables in the\n model if None.\n coords : dict\n Dictionary mapping dimensions to selected coordinates to be plotted.\n Dimensions without a mapping specified will include all coordinates for\n that dimension. Defaults to including all coordinates for all\n dimensions if None.\n flatten : list\n List of dimensions to flatten in observed_data. Only flattens across the coordinates\n specified in the coords argument. Defaults to flattening all of the dimensions.\n flatten_pp : list\n List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens\n across the coordinates specified in the coords argument. Defaults to flattening all\n of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs\n parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.\n num_pp_samples : int\n The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and\n `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7\n unless defined otherwise. Otherwise it defaults to all provided samples.\n random_seed : int\n Random number generator seed passed to numpy.random.seed to allow\n reproducibility of the plot. By default, no seed will be provided\n and the plot will change each call if a random sample is specified\n by `num_pp_samples`.\n jitter : float\n If kind is \"scatter\", jitter will add random uniform noise to the height\n of the ppc samples and observed data. By default 0.\n animated : bool\n Create an animation of one posterior/prior predictive sample per frame. Defaults to False.\n animation_kwargs : dict\n Keywords passed to `animation.FuncAnimation`.\n legend : bool\n Add legend to figure. By default True.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n group : {\"prior\", \"posterior\"}, optional\n Specifies which InferenceData group should be plotted. Defaults to 'posterior'.\n Other value can be 'prior'.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n Examples\n --------\n Plot the observed data KDE overlaid on posterior predictive KDEs.\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('radon')\n >>> az.plot_ppc(data,data_pairs={\"obs\":\"obs\"})\n\n Plot the overlay with empirical CDFs.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, kind='cumulative')\n\n Use the coords and flatten parameters to plot selected variable dimensions\n across multiple plots.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])\n\n Plot the overlay using a stacked scatter plot that is particularly useful\n when the sample sizes are small.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, kind='scatter', flatten=[],\n >>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})\n\n Plot random posterior predictive sub-samples.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)\n \"\"\"\n if group not in (\"posterior\", \"prior\"):\n raise TypeError(\"`group` argument must be either `posterior` or `prior`\")\n\n for groups in (\"{}_predictive\".format(group), \"observed_data\"):\n if not hasattr(data, groups):\n raise TypeError(\n '`data` argument must have the group \"{group}\" for ppcplot'.format(group=groups)\n )\n\n if kind.lower() not in (\"kde\", \"cumulative\", \"scatter\"):\n raise TypeError(\"`kind` argument must be either `kde`, `cumulative`, or `scatter`\")\n\n if data_pairs is None:\n data_pairs = {}\n\n if animation_kwargs is None:\n animation_kwargs = {}\n if platform.system() == \"Linux\":\n animation_kwargs.setdefault(\"blit\", True)\n else:\n animation_kwargs.setdefault(\"blit\", False)\n\n if animated and backend == \"bokeh\":\n raise TypeError(\"Animation option is only supported with matplotlib backend.\")\n\n if animated and animation_kwargs[\"blit\"] and platform.system() != \"Linux\":\n _log.warning(\n \"If you experience problems rendering the animation try setting\"\n \"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)\"\n )\n\n if alpha is None:\n if animated:\n alpha = 1\n else:\n if kind.lower() == \"scatter\":\n alpha = 0.7\n else:\n alpha = 0.2\n\n if jitter is None:\n jitter = 0.0\n assert jitter >= 0.0\n\n observed = data.observed_data\n\n if group == \"posterior\":\n predictive_dataset = data.posterior_predictive\n elif group == \"prior\":\n predictive_dataset = data.prior_predictive\n\n if var_names is None:\n var_names = list(observed.data_vars)\n var_names = _var_names(var_names, observed)\n pp_var_names = [data_pairs.get(var, var) for var in var_names]\n pp_var_names = _var_names(pp_var_names, predictive_dataset)\n\n if flatten_pp is None and flatten is None:\n flatten_pp = list(predictive_dataset.dims.keys())\n elif flatten_pp is None:\n flatten_pp = flatten\n if flatten is None:\n flatten = list(observed.dims.keys())\n\n if coords is None:\n coords = {}\n\n if random_seed is not None:\n np.random.seed(random_seed)\n\n total_pp_samples = predictive_dataset.sizes[\"chain\"] * predictive_dataset.sizes[\"draw\"]\n if num_pp_samples is None:\n if kind == \"scatter\" and not animated:\n num_pp_samples = min(5, total_pp_samples)\n else:\n num_pp_samples = total_pp_samples\n\n if (\n not isinstance(num_pp_samples, Integral)\n or num_pp_samples < 1\n or num_pp_samples > total_pp_samples\n ):\n raise TypeError(\n \"`num_pp_samples` must be an integer between 1 and \"\n + \"{limit}.\".format(limit=total_pp_samples)\n )\n\n pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)\n\n for key in coords.keys():\n coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]\n\n obs_plotters = filter_plotters_list(\n list(\n xarray_var_iter(\n observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True\n )\n ),\n \"plot_ppc\",\n )\n length_plotters = len(obs_plotters)\n pp_plotters = [\n tup\n for _, tup in zip(\n range(length_plotters),\n xarray_var_iter(\n predictive_dataset.isel(coords),\n var_names=pp_var_names,\n skip_dims=set(flatten_pp),\n combined=True,\n ),\n )\n ]\n rows, cols = default_grid(length_plotters)\n\n (figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(\n figsize, textsize, rows, cols\n )\n\n ppcplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n animated=animated,\n obs_plotters=obs_plotters,\n pp_plotters=pp_plotters,\n predictive_dataset=predictive_dataset,\n pp_sample_ix=pp_sample_ix,\n kind=kind,\n alpha=alpha,\n linewidth=linewidth,\n mean=mean,\n xt_labelsize=xt_labelsize,\n ax_labelsize=ax_labelsize,\n jitter=jitter,\n total_pp_samples=total_pp_samples,\n legend=legend,\n markersize=markersize,\n animation_kwargs=animation_kwargs,\n num_pp_samples=num_pp_samples,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend == \"bokeh\":\n\n ppcplot_kwargs.pop(\"animated\")\n ppcplot_kwargs.pop(\"animation_kwargs\")\n ppcplot_kwargs.pop(\"legend\")\n ppcplot_kwargs.pop(\"xt_labelsize\")\n ppcplot_kwargs.pop(\"ax_labelsize\")\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_ppc\", \"ppcplot\", backend)\n axes = plot(**ppcplot_kwargs)\n return axes\n", "path": "arviz/plots/ppcplot.py"}], "after_files": [{"content": "\"\"\"Posterior/Prior predictive plot.\"\"\"\nfrom numbers import Integral\nimport platform\nimport logging\nimport numpy as np\n\nfrom .plot_utils import (\n xarray_var_iter,\n _scale_fig_size,\n default_grid,\n filter_plotters_list,\n get_plotting_function,\n)\nfrom ..utils import _var_names\n\n_log = logging.getLogger(__name__)\n\n\ndef plot_ppc(\n data,\n kind=\"kde\",\n alpha=None,\n mean=True,\n figsize=None,\n textsize=None,\n data_pairs=None,\n var_names=None,\n coords=None,\n flatten=None,\n flatten_pp=None,\n num_pp_samples=None,\n random_seed=None,\n jitter=None,\n animated=False,\n animation_kwargs=None,\n legend=True,\n ax=None,\n backend=None,\n backend_kwargs=None,\n group=\"posterior\",\n show=None,\n):\n \"\"\"\n Plot for posterior/prior predictive checks.\n\n Parameters\n ----------\n data : az.InferenceData object\n InferenceData object containing the observed and posterior/prior predictive data.\n kind : str\n Type of plot to display (kde, cumulative, or scatter). Defaults to kde.\n alpha : float\n Opacity of posterior/prior predictive density curves.\n Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7\n mean : bool\n Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be\n autoscaled based on figsize.\n data_pairs : dict\n Dictionary containing relations between observed data and posterior/prior predictive data.\n Dictionary structure:\n key = data var_name\n value = posterior/prior predictive var_name\n For example, `data_pairs = {'y' : 'y_hat'}`\n If None, it will assume that the observed data and the posterior/prior\n predictive data have the same variable name.\n var_names : list\n List of variables to be plotted. Defaults to all observed variables in the\n model if None.\n coords : dict\n Dictionary mapping dimensions to selected coordinates to be plotted.\n Dimensions without a mapping specified will include all coordinates for\n that dimension. Defaults to including all coordinates for all\n dimensions if None.\n flatten : list\n List of dimensions to flatten in observed_data. Only flattens across the coordinates\n specified in the coords argument. Defaults to flattening all of the dimensions.\n flatten_pp : list\n List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens\n across the coordinates specified in the coords argument. Defaults to flattening all\n of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs\n parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.\n num_pp_samples : int\n The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and\n `animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7\n unless defined otherwise. Otherwise it defaults to all provided samples.\n random_seed : int\n Random number generator seed passed to numpy.random.seed to allow\n reproducibility of the plot. By default, no seed will be provided\n and the plot will change each call if a random sample is specified\n by `num_pp_samples`.\n jitter : float\n If kind is \"scatter\", jitter will add random uniform noise to the height\n of the ppc samples and observed data. By default 0.\n animated : bool\n Create an animation of one posterior/prior predictive sample per frame. Defaults to False.\n animation_kwargs : dict\n Keywords passed to `animation.FuncAnimation`.\n legend : bool\n Add legend to figure. By default True.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n group : {\"prior\", \"posterior\"}, optional\n Specifies which InferenceData group should be plotted. Defaults to 'posterior'.\n Other value can be 'prior'.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n Examples\n --------\n Plot the observed data KDE overlaid on posterior predictive KDEs.\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> data = az.load_arviz_data('radon')\n >>> az.plot_ppc(data,data_pairs={\"obs\":\"obs\"})\n >>> #az.plot_ppc(data,data_pairs={\"obs\":\"obs_hat\"})\n\n Plot the overlay with empirical CDFs.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, kind='cumulative')\n\n Use the coords and flatten parameters to plot selected variable dimensions\n across multiple plots.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])\n\n Plot the overlay using a stacked scatter plot that is particularly useful\n when the sample sizes are small.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, kind='scatter', flatten=[],\n >>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})\n\n Plot random posterior predictive sub-samples.\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)\n \"\"\"\n if group not in (\"posterior\", \"prior\"):\n raise TypeError(\"`group` argument must be either `posterior` or `prior`\")\n\n for groups in (\"{}_predictive\".format(group), \"observed_data\"):\n if not hasattr(data, groups):\n raise TypeError(\n '`data` argument must have the group \"{group}\" for ppcplot'.format(group=groups)\n )\n\n if kind.lower() not in (\"kde\", \"cumulative\", \"scatter\"):\n raise TypeError(\"`kind` argument must be either `kde`, `cumulative`, or `scatter`\")\n\n if data_pairs is None:\n data_pairs = {}\n\n if animation_kwargs is None:\n animation_kwargs = {}\n if platform.system() == \"Linux\":\n animation_kwargs.setdefault(\"blit\", True)\n else:\n animation_kwargs.setdefault(\"blit\", False)\n\n if animated and backend == \"bokeh\":\n raise TypeError(\"Animation option is only supported with matplotlib backend.\")\n\n if animated and animation_kwargs[\"blit\"] and platform.system() != \"Linux\":\n _log.warning(\n \"If you experience problems rendering the animation try setting\"\n \"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)\"\n )\n\n if alpha is None:\n if animated:\n alpha = 1\n else:\n if kind.lower() == \"scatter\":\n alpha = 0.7\n else:\n alpha = 0.2\n\n if jitter is None:\n jitter = 0.0\n assert jitter >= 0.0\n\n observed = data.observed_data\n\n if group == \"posterior\":\n predictive_dataset = data.posterior_predictive\n elif group == \"prior\":\n predictive_dataset = data.prior_predictive\n\n if var_names is None:\n var_names = list(observed.data_vars)\n var_names = _var_names(var_names, observed)\n pp_var_names = [data_pairs.get(var, var) for var in var_names]\n pp_var_names = _var_names(pp_var_names, predictive_dataset)\n\n if flatten_pp is None and flatten is None:\n flatten_pp = list(predictive_dataset.dims.keys())\n elif flatten_pp is None:\n flatten_pp = flatten\n if flatten is None:\n flatten = list(observed.dims.keys())\n\n if coords is None:\n coords = {}\n\n if random_seed is not None:\n np.random.seed(random_seed)\n\n total_pp_samples = predictive_dataset.sizes[\"chain\"] * predictive_dataset.sizes[\"draw\"]\n if num_pp_samples is None:\n if kind == \"scatter\" and not animated:\n num_pp_samples = min(5, total_pp_samples)\n else:\n num_pp_samples = total_pp_samples\n\n if (\n not isinstance(num_pp_samples, Integral)\n or num_pp_samples < 1\n or num_pp_samples > total_pp_samples\n ):\n raise TypeError(\n \"`num_pp_samples` must be an integer between 1 and \"\n + \"{limit}.\".format(limit=total_pp_samples)\n )\n\n pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)\n\n for key in coords.keys():\n coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]\n\n obs_plotters = filter_plotters_list(\n list(\n xarray_var_iter(\n observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True\n )\n ),\n \"plot_ppc\",\n )\n length_plotters = len(obs_plotters)\n pp_plotters = [\n tup\n for _, tup in zip(\n range(length_plotters),\n xarray_var_iter(\n predictive_dataset.isel(coords),\n var_names=pp_var_names,\n skip_dims=set(flatten_pp),\n combined=True,\n ),\n )\n ]\n rows, cols = default_grid(length_plotters)\n\n (figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(\n figsize, textsize, rows, cols\n )\n\n ppcplot_kwargs = dict(\n ax=ax,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n figsize=figsize,\n animated=animated,\n obs_plotters=obs_plotters,\n pp_plotters=pp_plotters,\n predictive_dataset=predictive_dataset,\n pp_sample_ix=pp_sample_ix,\n kind=kind,\n alpha=alpha,\n linewidth=linewidth,\n mean=mean,\n xt_labelsize=xt_labelsize,\n ax_labelsize=ax_labelsize,\n jitter=jitter,\n total_pp_samples=total_pp_samples,\n legend=legend,\n markersize=markersize,\n animation_kwargs=animation_kwargs,\n num_pp_samples=num_pp_samples,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend == \"bokeh\":\n\n ppcplot_kwargs.pop(\"animated\")\n ppcplot_kwargs.pop(\"animation_kwargs\")\n ppcplot_kwargs.pop(\"legend\")\n ppcplot_kwargs.pop(\"xt_labelsize\")\n ppcplot_kwargs.pop(\"ax_labelsize\")\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_ppc\", \"ppcplot\", backend)\n axes = plot(**ppcplot_kwargs)\n return axes\n", "path": "arviz/plots/ppcplot.py"}]}
| 4,063 | 238 |
gh_patches_debug_17010
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-4880
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Alembic migration for metrics table uses incorrect server default
## MLflow Roadmap Item
This is an MLflow Roadmap item that has been prioritized by the MLflow maintainers. We're seeking help with the implementation of roadmap items tagged with the `help wanted` label.
For requirements clarifications and implementation questions, or to request a PR review, please tag @harupy in your communications related to this issue.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: miniconda container - debian buster
- **MLflow installed from (source or binary)**: mflow from pypi
- **MLflow version (run ``mlflow --version``)**: mlflow 1.14.1 trying to upgrade to 1.16 or 1.17
- **Python version**: Python 3.9.2
- **npm version, if running the dev UI**: NA
- **Exact command to reproduce**: mlflow db upgrade <MSSQL connection string >
- Tracking server DB: Azure Microsoft SQL DB
### Describe the problem
When I upgrade the database from 1.14.1 to a higher version I get an error. Currently use an Azure MSFT DB. Would like to upgrade to 1.16 or 1.17
### Code to reproduce issue
mlflow db upgrade "mssql+pyodbc://_rest_of_conn_string"
### Other info / logs
sqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Column already has a DEFAULT bound to it. (1781) (SQLExecDirectW)')
[SQL: ALTER TABLE metrics ADD DEFAULT '0' FOR is_nan]
(Background on this error at: http://sqlalche.me/e/14/f405)
### What component(s), interfaces, languages, and integrations does this bug affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py`
Content:
```
1 """reset_default_value_for_is_nan_in_metrics_table_for_mysql
2
3 Revision ID: c48cb773bb87
4 Revises: 39d1c3be5f05
5 Create Date: 2021-04-02 15:43:28.466043
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = "c48cb773bb87"
14 down_revision = "39d1c3be5f05"
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # This part of the migration is only relevant for MySQL.
21 # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py
22 # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`
23 # table without specifying `existing_server_default`. This alters the column default value to
24 # NULL in MySQL (see the doc below).
25 #
26 # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column
27 #
28 # To revert this change, set the default column value to "0" by specifying `server_default`
29 with op.batch_alter_table("metrics") as batch_op:
30 batch_op.alter_column(
31 "is_nan",
32 type_=sa.types.Boolean(create_constraint=True),
33 nullable=False,
34 server_default="0",
35 )
36
37
38 def downgrade():
39 pass
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py
--- a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py
+++ b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py
@@ -26,13 +26,15 @@
# https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column
#
# To revert this change, set the default column value to "0" by specifying `server_default`
- with op.batch_alter_table("metrics") as batch_op:
- batch_op.alter_column(
- "is_nan",
- type_=sa.types.Boolean(create_constraint=True),
- nullable=False,
- server_default="0",
- )
+ bind = op.get_bind()
+ if bind.engine.name == "mysql":
+ with op.batch_alter_table("metrics") as batch_op:
+ batch_op.alter_column(
+ "is_nan",
+ type_=sa.types.Boolean(create_constraint=True),
+ nullable=False,
+ server_default="0",
+ )
def downgrade():
|
{"golden_diff": "diff --git a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n--- a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n+++ b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n@@ -26,13 +26,15 @@\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n- with op.batch_alter_table(\"metrics\") as batch_op:\n- batch_op.alter_column(\n- \"is_nan\",\n- type_=sa.types.Boolean(create_constraint=True),\n- nullable=False,\n- server_default=\"0\",\n- )\n+ bind = op.get_bind()\n+ if bind.engine.name == \"mysql\":\n+ with op.batch_alter_table(\"metrics\") as batch_op:\n+ batch_op.alter_column(\n+ \"is_nan\",\n+ type_=sa.types.Boolean(create_constraint=True),\n+ nullable=False,\n+ server_default=\"0\",\n+ )\n \n \n def downgrade():\n", "issue": "[BUG] Alembic migration for metrics table uses incorrect server default\n## MLflow Roadmap Item\r\n\r\nThis is an MLflow Roadmap item that has been prioritized by the MLflow maintainers. We're seeking help with the implementation of roadmap items tagged with the `help wanted` label.\r\n\r\nFor requirements clarifications and implementation questions, or to request a PR review, please tag @harupy in your communications related to this issue.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: miniconda container - debian buster\r\n- **MLflow installed from (source or binary)**: mflow from pypi\r\n- **MLflow version (run ``mlflow --version``)**: mlflow 1.14.1 trying to upgrade to 1.16 or 1.17\r\n- **Python version**: Python 3.9.2\r\n- **npm version, if running the dev UI**: NA\r\n- **Exact command to reproduce**: mlflow db upgrade <MSSQL connection string >\r\n- Tracking server DB: Azure Microsoft SQL DB\r\n\r\n### Describe the problem\r\nWhen I upgrade the database from 1.14.1 to a higher version I get an error. Currently use an Azure MSFT DB. Would like to upgrade to 1.16 or 1.17\r\n\r\n### Code to reproduce issue\r\nmlflow db upgrade \"mssql+pyodbc://_rest_of_conn_string\"\r\n\r\n### Other info / logs\r\nsqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Column already has a DEFAULT bound to it. (1781) (SQLExecDirectW)')\r\n[SQL: ALTER TABLE metrics ADD DEFAULT '0' FOR is_nan]\r\n(Background on this error at: http://sqlalche.me/e/14/f405)\r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "\"\"\"reset_default_value_for_is_nan_in_metrics_table_for_mysql\n\nRevision ID: c48cb773bb87\nRevises: 39d1c3be5f05\nCreate Date: 2021-04-02 15:43:28.466043\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c48cb773bb87\"\ndown_revision = \"39d1c3be5f05\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # This part of the migration is only relevant for MySQL.\n # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py\n # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`\n # table without specifying `existing_server_default`. This alters the column default value to\n # NULL in MySQL (see the doc below).\n #\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n with op.batch_alter_table(\"metrics\") as batch_op:\n batch_op.alter_column(\n \"is_nan\",\n type_=sa.types.Boolean(create_constraint=True),\n nullable=False,\n server_default=\"0\",\n )\n\n\ndef downgrade():\n pass\n", "path": "mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py"}], "after_files": [{"content": "\"\"\"reset_default_value_for_is_nan_in_metrics_table_for_mysql\n\nRevision ID: c48cb773bb87\nRevises: 39d1c3be5f05\nCreate Date: 2021-04-02 15:43:28.466043\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c48cb773bb87\"\ndown_revision = \"39d1c3be5f05\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # This part of the migration is only relevant for MySQL.\n # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py\n # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`\n # table without specifying `existing_server_default`. This alters the column default value to\n # NULL in MySQL (see the doc below).\n #\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n bind = op.get_bind()\n if bind.engine.name == \"mysql\":\n with op.batch_alter_table(\"metrics\") as batch_op:\n batch_op.alter_column(\n \"is_nan\",\n type_=sa.types.Boolean(create_constraint=True),\n nullable=False,\n server_default=\"0\",\n )\n\n\ndef downgrade():\n pass\n", "path": "mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py"}]}
| 1,513 | 328 |
gh_patches_debug_21991
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-2767
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use non-blocking multithreaded progress I/O
continuing discussion from #2697
- use the old non-blocking approach
+ https://github.com/iterative/dvc/blob/0.35.6/dvc/progress.py#L51
- use blocking upon completion
+ https://github.com/iterative/dvc/blob/0.35.6/dvc/progress.py#L70
- depends on https://github.com/tqdm/tqdm/issues/838 -> https://github.com/tqdm/tqdm/pull/839
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3
4 from setuptools import find_packages
5 from setuptools import setup
6 from setuptools.command.build_py import build_py as _build_py
7
8 import fastentrypoints # noqa: F401
9
10 # Prevents pkg_resources import in entry point script,
11 # see https://github.com/ninjaaron/fast-entry_points.
12 # This saves about 200 ms on startup time for non-wheel installs.
13
14
15 # https://packaging.python.org/guides/single-sourcing-package-version/
16 pkg_dir = os.path.dirname(os.path.abspath(__file__))
17 version_path = os.path.join(pkg_dir, "dvc", "version.py")
18 if sys.version_info[0] == 2:
19 import imp
20
21 dvc_version = imp.load_source("dvc.version", version_path)
22 else:
23 import importlib.util
24
25 spec = importlib.util.spec_from_file_location("dvc.version", version_path)
26 dvc_version = importlib.util.module_from_spec(spec)
27 spec.loader.exec_module(dvc_version)
28
29 version = dvc_version.__version__ # noqa: F821
30
31
32 # To achieve consistency between the build version and the one provided
33 # by your package during runtime, you need to **pin** the build version.
34 #
35 # This custom class will replace the version.py module with a **static**
36 # `__version__` that your package can read at runtime, assuring consistancy.
37 #
38 # References:
39 # - https://docs.python.org/3.7/distutils/extending.html
40 # - https://github.com/python/mypy
41 class build_py(_build_py):
42 def pin_version(self):
43 path = os.path.join(self.build_lib, "dvc")
44 self.mkpath(path)
45 with open(os.path.join(path, "version.py"), "w") as fobj:
46 fobj.write("# AUTOGENERATED at build time by setup.py\n")
47 fobj.write('__version__ = "{}"\n'.format(version))
48
49 def run(self):
50 self.execute(self.pin_version, ())
51 _build_py.run(self)
52
53
54 install_requires = [
55 "ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
56 "configparser>=3.5.0",
57 "future>=0.16.0",
58 "colorama>=0.3.9",
59 "configobj>=5.0.6",
60 "gitpython>=2.1.8",
61 "gitdb2>=2.0.6", # See https://github.com/iterative/dvc/issues/1880
62 "setuptools>=34.0.0",
63 "nanotime>=0.5.2",
64 "pyasn1>=0.4.1",
65 "schema>=0.6.7",
66 "jsonpath-ng>=1.4.3",
67 "requests>=2.22.0",
68 "grandalf==0.6",
69 "asciimatics>=1.10.0",
70 "distro>=1.3.0",
71 "appdirs>=1.4.3",
72 "treelib>=1.5.5",
73 "inflect>=2.1.0",
74 "humanize>=0.5.1",
75 "ruamel.yaml>=0.16.1",
76 "funcy>=1.12",
77 "pathspec>=0.6.0",
78 "shortuuid>=0.5.0",
79 "tqdm>=4.36.1,<5",
80 "packaging>=19.0",
81 "win-unicode-console>=0.5; sys_platform == 'win32'",
82 "pywin32>=225; sys_platform == 'win32'",
83 ]
84
85 if sys.version_info[0] == 2:
86 install_requires.append("networkx>=2.1,<2.3")
87 else:
88 install_requires.append("networkx>=2.1,<2.4")
89
90 # Extra dependencies for remote integrations
91
92 gs = ["google-cloud-storage==1.19.0"]
93 s3 = ["boto3==1.9.115"]
94 azure = ["azure-storage-blob==2.1.0"]
95 oss = ["oss2==2.6.1"]
96 ssh = ["paramiko>=2.5.0"]
97 # gssapi should not be included in all_remotes, because it doesn't have wheels
98 # for linux and mac, so it will fail to compile if user doesn't have all the
99 # requirements, including kerberos itself. Once all the wheels are available,
100 # we can start shipping it by default.
101 ssh_gssapi = ["paramiko[gssapi]>=2.5.0"]
102 hdfs = ["pyarrow==0.14.0"]
103 all_remotes = gs + s3 + azure + ssh + oss
104
105 if os.name != "nt" or sys.version_info[0] != 2:
106 # NOTE: there are no pyarrow wheels for python2 on windows
107 all_remotes += hdfs
108
109 # Extra dependecies to run tests
110 tests_requirements = [
111 "PyInstaller==3.5",
112 "wheel>=0.31.1",
113 "pydot>=1.2.4",
114 # Test requirements:
115 "pytest>=4.6.0",
116 "pytest-timeout>=1.3.3",
117 "pytest-cov>=2.6.1",
118 "pytest-xdist>=1.26.1",
119 "pytest-mock>=1.10.4",
120 "flaky>=3.5.3",
121 "mock>=3.0.0",
122 "xmltodict>=0.11.0",
123 "awscli==1.16.266",
124 "google-compute-engine==2.8.13",
125 "Pygments", # required by collective.checkdocs,
126 "collective.checkdocs",
127 "flake8",
128 "psutil",
129 "flake8-docstrings",
130 "pydocstyle<4.0",
131 "jaraco.windows==3.9.2",
132 "mock-ssh-server>=0.6.0",
133 "moto==1.3.14.dev464",
134 "rangehttpserver==1.2.0",
135 ]
136
137 if (sys.version_info) >= (3, 6):
138 tests_requirements.append("black==19.3b0")
139
140 setup(
141 name="dvc",
142 version=version,
143 description="Git for data scientists - manage your code and data together",
144 long_description=open("README.rst", "r").read(),
145 author="Dmitry Petrov",
146 author_email="[email protected]",
147 download_url="https://github.com/iterative/dvc",
148 license="Apache License 2.0",
149 install_requires=install_requires,
150 extras_require={
151 "all": all_remotes,
152 "gs": gs,
153 "s3": s3,
154 "azure": azure,
155 "oss": oss,
156 "ssh": ssh,
157 "ssh_gssapi": ssh_gssapi,
158 "hdfs": hdfs,
159 # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1
160 ":python_version=='2.7'": [
161 "futures",
162 "pathlib2",
163 "zc.lockfile>=1.2.1",
164 ],
165 ":python_version>='3.0'": ["flufl.lock>=3.2"],
166 "tests": tests_requirements,
167 },
168 keywords="data science, data version control, machine learning",
169 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
170 classifiers=[
171 "Development Status :: 4 - Beta",
172 "Programming Language :: Python :: 2",
173 "Programming Language :: Python :: 2.7",
174 "Programming Language :: Python :: 3",
175 "Programming Language :: Python :: 3.5",
176 "Programming Language :: Python :: 3.6",
177 "Programming Language :: Python :: 3.7",
178 ],
179 packages=find_packages(exclude=["tests"]),
180 include_package_data=True,
181 url="http://dvc.org",
182 entry_points={"console_scripts": ["dvc = dvc.main:main"]},
183 cmdclass={"build_py": build_py},
184 zip_safe=False,
185 )
186
```
Path: `dvc/progress.py`
Content:
```
1 """Manages progress bars for dvc repo."""
2 from __future__ import print_function
3
4 import logging
5 import sys
6
7 from funcy import merge
8 from tqdm import tqdm
9
10 from dvc.utils import env2bool
11
12 logger = logging.getLogger(__name__)
13
14
15 class Tqdm(tqdm):
16 """
17 maximum-compatibility tqdm-based progressbars
18 """
19
20 BAR_FMT_DEFAULT = (
21 "{percentage:3.0f}%|{bar:10}|"
22 "{desc:{ncols_desc}.{ncols_desc}}{n}/{total}"
23 " [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]"
24 )
25 BAR_FMT_NOTOTAL = (
26 "{desc:{ncols_desc}.{ncols_desc}}{n}"
27 " [{elapsed}<??:??, {rate_fmt:>11}{postfix}]"
28 )
29
30 def __init__(
31 self,
32 iterable=None,
33 disable=None,
34 level=logging.ERROR,
35 desc=None,
36 leave=False,
37 bar_format=None,
38 bytes=False, # pylint: disable=W0622
39 file=None,
40 **kwargs
41 ):
42 """
43 bytes : shortcut for
44 `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`
45 desc : persists after `close()`
46 level : effective logging level for determining `disable`;
47 used only if `disable` is unspecified
48 disable : If (default: None), will be determined by logging level.
49 May be overridden to `True` due to non-TTY status.
50 Skip override by specifying env var `DVC_IGNORE_ISATTY`.
51 kwargs : anything accepted by `tqdm.tqdm()`
52 """
53 kwargs = kwargs.copy()
54 kwargs.setdefault("unit_scale", True)
55 if bytes:
56 bytes_defaults = dict(
57 unit="B", unit_scale=True, unit_divisor=1024, miniters=1
58 )
59 kwargs = merge(bytes_defaults, kwargs)
60 if file is None:
61 file = sys.stderr
62 self.desc_persist = desc
63 # auto-disable based on `logger.level`
64 if disable is None:
65 disable = logger.getEffectiveLevel() > level
66 # auto-disable based on TTY
67 if (
68 not disable
69 and not env2bool("DVC_IGNORE_ISATTY")
70 and hasattr(file, "isatty")
71 ):
72 disable = not file.isatty()
73 super(Tqdm, self).__init__(
74 iterable=iterable,
75 disable=disable,
76 leave=leave,
77 desc=desc,
78 bar_format="!",
79 **kwargs
80 )
81 if bar_format is None:
82 if self.__len__():
83 self.bar_format = self.BAR_FMT_DEFAULT
84 else:
85 self.bar_format = self.BAR_FMT_NOTOTAL
86 else:
87 self.bar_format = bar_format
88 self.refresh()
89
90 def update_desc(self, desc, n=1):
91 """
92 Calls `set_description_str(desc)` and `update(n)`
93 """
94 self.set_description_str(desc, refresh=False)
95 self.update(n)
96
97 def update_to(self, current, total=None):
98 if total:
99 self.total = total # pylint: disable=W0613,W0201
100 self.update(current - self.n)
101
102 def close(self):
103 if self.desc_persist is not None:
104 self.set_description_str(self.desc_persist, refresh=False)
105 super(Tqdm, self).close()
106
107 @property
108 def format_dict(self):
109 """inject `ncols_desc` to fill the display width (`ncols`)"""
110 d = super(Tqdm, self).format_dict
111 ncols = d["ncols"] or 80
112 ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1
113 ncols_desc = max(ncols_desc, 0)
114 if ncols_desc:
115 d["ncols_desc"] = ncols_desc
116 else:
117 # work-around for zero-width description
118 d["ncols_desc"] = 1
119 d["prefix"] = ""
120 return d
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/progress.py b/dvc/progress.py
--- a/dvc/progress.py
+++ b/dvc/progress.py
@@ -3,6 +3,7 @@
import logging
import sys
+from threading import RLock
from funcy import merge
from tqdm import tqdm
@@ -10,6 +11,7 @@
from dvc.utils import env2bool
logger = logging.getLogger(__name__)
+tqdm.set_lock(RLock())
class Tqdm(tqdm):
@@ -76,6 +78,7 @@
leave=leave,
desc=desc,
bar_format="!",
+ lock_args=(False,),
**kwargs
)
if bar_format is None:
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,7 +76,7 @@
"funcy>=1.12",
"pathspec>=0.6.0",
"shortuuid>=0.5.0",
- "tqdm>=4.36.1,<5",
+ "tqdm>=4.38.0,<5",
"packaging>=19.0",
"win-unicode-console>=0.5; sys_platform == 'win32'",
"pywin32>=225; sys_platform == 'win32'",
|
{"golden_diff": "diff --git a/dvc/progress.py b/dvc/progress.py\n--- a/dvc/progress.py\n+++ b/dvc/progress.py\n@@ -3,6 +3,7 @@\n \n import logging\n import sys\n+from threading import RLock\n \n from funcy import merge\n from tqdm import tqdm\n@@ -10,6 +11,7 @@\n from dvc.utils import env2bool\n \n logger = logging.getLogger(__name__)\n+tqdm.set_lock(RLock())\n \n \n class Tqdm(tqdm):\n@@ -76,6 +78,7 @@\n leave=leave,\n desc=desc,\n bar_format=\"!\",\n+ lock_args=(False,),\n **kwargs\n )\n if bar_format is None:\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -76,7 +76,7 @@\n \"funcy>=1.12\",\n \"pathspec>=0.6.0\",\n \"shortuuid>=0.5.0\",\n- \"tqdm>=4.36.1,<5\",\n+ \"tqdm>=4.38.0,<5\",\n \"packaging>=19.0\",\n \"win-unicode-console>=0.5; sys_platform == 'win32'\",\n \"pywin32>=225; sys_platform == 'win32'\",\n", "issue": "use non-blocking multithreaded progress I/O\ncontinuing discussion from #2697\r\n\r\n- use the old non-blocking approach\r\n + https://github.com/iterative/dvc/blob/0.35.6/dvc/progress.py#L51\r\n- use blocking upon completion\r\n + https://github.com/iterative/dvc/blob/0.35.6/dvc/progress.py#L70\r\n- depends on https://github.com/tqdm/tqdm/issues/838 -> https://github.com/tqdm/tqdm/pull/839\n", "before_files": [{"content": "import os\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py as _build_py\n\nimport fastentrypoints # noqa: F401\n\n# Prevents pkg_resources import in entry point script,\n# see https://github.com/ninjaaron/fast-entry_points.\n# This saves about 200 ms on startup time for non-wheel installs.\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\npkg_dir = os.path.dirname(os.path.abspath(__file__))\nversion_path = os.path.join(pkg_dir, \"dvc\", \"version.py\")\nif sys.version_info[0] == 2:\n import imp\n\n dvc_version = imp.load_source(\"dvc.version\", version_path)\nelse:\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(\"dvc.version\", version_path)\n dvc_version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(dvc_version)\n\nversion = dvc_version.__version__ # noqa: F821\n\n\n# To achieve consistency between the build version and the one provided\n# by your package during runtime, you need to **pin** the build version.\n#\n# This custom class will replace the version.py module with a **static**\n# `__version__` that your package can read at runtime, assuring consistancy.\n#\n# References:\n# - https://docs.python.org/3.7/distutils/extending.html\n# - https://github.com/python/mypy\nclass build_py(_build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"dvc\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as fobj:\n fobj.write(\"# AUTOGENERATED at build time by setup.py\\n\")\n fobj.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n _build_py.run(self)\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"gitpython>=2.1.8\",\n \"gitdb2>=2.0.6\", # See https://github.com/iterative/dvc/issues/1880\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-ng>=1.4.3\",\n \"requests>=2.22.0\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n \"inflect>=2.1.0\",\n \"humanize>=0.5.1\",\n \"ruamel.yaml>=0.16.1\",\n \"funcy>=1.12\",\n \"pathspec>=0.6.0\",\n \"shortuuid>=0.5.0\",\n \"tqdm>=4.36.1,<5\",\n \"packaging>=19.0\",\n \"win-unicode-console>=0.5; sys_platform == 'win32'\",\n \"pywin32>=225; sys_platform == 'win32'\",\n]\n\nif sys.version_info[0] == 2:\n install_requires.append(\"networkx>=2.1,<2.3\")\nelse:\n install_requires.append(\"networkx>=2.1,<2.4\")\n\n# Extra dependencies for remote integrations\n\ngs = [\"google-cloud-storage==1.19.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==2.1.0\"]\noss = [\"oss2==2.6.1\"]\nssh = [\"paramiko>=2.5.0\"]\n# gssapi should not be included in all_remotes, because it doesn't have wheels\n# for linux and mac, so it will fail to compile if user doesn't have all the\n# requirements, including kerberos itself. Once all the wheels are available,\n# we can start shipping it by default.\nssh_gssapi = [\"paramiko[gssapi]>=2.5.0\"]\nhdfs = [\"pyarrow==0.14.0\"]\nall_remotes = gs + s3 + azure + ssh + oss\n\nif os.name != \"nt\" or sys.version_info[0] != 2:\n # NOTE: there are no pyarrow wheels for python2 on windows\n all_remotes += hdfs\n\n# Extra dependecies to run tests\ntests_requirements = [\n \"PyInstaller==3.5\",\n \"wheel>=0.31.1\",\n \"pydot>=1.2.4\",\n # Test requirements:\n \"pytest>=4.6.0\",\n \"pytest-timeout>=1.3.3\",\n \"pytest-cov>=2.6.1\",\n \"pytest-xdist>=1.26.1\",\n \"pytest-mock>=1.10.4\",\n \"flaky>=3.5.3\",\n \"mock>=3.0.0\",\n \"xmltodict>=0.11.0\",\n \"awscli==1.16.266\",\n \"google-compute-engine==2.8.13\",\n \"Pygments\", # required by collective.checkdocs,\n \"collective.checkdocs\",\n \"flake8\",\n \"psutil\",\n \"flake8-docstrings\",\n \"pydocstyle<4.0\",\n \"jaraco.windows==3.9.2\",\n \"mock-ssh-server>=0.6.0\",\n \"moto==1.3.14.dev464\",\n \"rangehttpserver==1.2.0\",\n]\n\nif (sys.version_info) >= (3, 6):\n tests_requirements.append(\"black==19.3b0\")\n\nsetup(\n name=\"dvc\",\n version=version,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"oss\": oss,\n \"ssh\": ssh,\n \"ssh_gssapi\": ssh_gssapi,\n \"hdfs\": hdfs,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n \":python_version=='2.7'\": [\n \"futures\",\n \"pathlib2\",\n \"zc.lockfile>=1.2.1\",\n ],\n \":python_version>='3.0'\": [\"flufl.lock>=3.2\"],\n \"tests\": tests_requirements,\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dvc.org\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n cmdclass={\"build_py\": build_py},\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "\"\"\"Manages progress bars for dvc repo.\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport sys\n\nfrom funcy import merge\nfrom tqdm import tqdm\n\nfrom dvc.utils import env2bool\n\nlogger = logging.getLogger(__name__)\n\n\nclass Tqdm(tqdm):\n \"\"\"\n maximum-compatibility tqdm-based progressbars\n \"\"\"\n\n BAR_FMT_DEFAULT = (\n \"{percentage:3.0f}%|{bar:10}|\"\n \"{desc:{ncols_desc}.{ncols_desc}}{n}/{total}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n BAR_FMT_NOTOTAL = (\n \"{desc:{ncols_desc}.{ncols_desc}}{n}\"\n \" [{elapsed}<??:??, {rate_fmt:>11}{postfix}]\"\n )\n\n def __init__(\n self,\n iterable=None,\n disable=None,\n level=logging.ERROR,\n desc=None,\n leave=False,\n bar_format=None,\n bytes=False, # pylint: disable=W0622\n file=None,\n **kwargs\n ):\n \"\"\"\n bytes : shortcut for\n `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`\n desc : persists after `close()`\n level : effective logging level for determining `disable`;\n used only if `disable` is unspecified\n disable : If (default: None), will be determined by logging level.\n May be overridden to `True` due to non-TTY status.\n Skip override by specifying env var `DVC_IGNORE_ISATTY`.\n kwargs : anything accepted by `tqdm.tqdm()`\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"unit_scale\", True)\n if bytes:\n bytes_defaults = dict(\n unit=\"B\", unit_scale=True, unit_divisor=1024, miniters=1\n )\n kwargs = merge(bytes_defaults, kwargs)\n if file is None:\n file = sys.stderr\n self.desc_persist = desc\n # auto-disable based on `logger.level`\n if disable is None:\n disable = logger.getEffectiveLevel() > level\n # auto-disable based on TTY\n if (\n not disable\n and not env2bool(\"DVC_IGNORE_ISATTY\")\n and hasattr(file, \"isatty\")\n ):\n disable = not file.isatty()\n super(Tqdm, self).__init__(\n iterable=iterable,\n disable=disable,\n leave=leave,\n desc=desc,\n bar_format=\"!\",\n **kwargs\n )\n if bar_format is None:\n if self.__len__():\n self.bar_format = self.BAR_FMT_DEFAULT\n else:\n self.bar_format = self.BAR_FMT_NOTOTAL\n else:\n self.bar_format = bar_format\n self.refresh()\n\n def update_desc(self, desc, n=1):\n \"\"\"\n Calls `set_description_str(desc)` and `update(n)`\n \"\"\"\n self.set_description_str(desc, refresh=False)\n self.update(n)\n\n def update_to(self, current, total=None):\n if total:\n self.total = total # pylint: disable=W0613,W0201\n self.update(current - self.n)\n\n def close(self):\n if self.desc_persist is not None:\n self.set_description_str(self.desc_persist, refresh=False)\n super(Tqdm, self).close()\n\n @property\n def format_dict(self):\n \"\"\"inject `ncols_desc` to fill the display width (`ncols`)\"\"\"\n d = super(Tqdm, self).format_dict\n ncols = d[\"ncols\"] or 80\n ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1\n ncols_desc = max(ncols_desc, 0)\n if ncols_desc:\n d[\"ncols_desc\"] = ncols_desc\n else:\n # work-around for zero-width description\n d[\"ncols_desc\"] = 1\n d[\"prefix\"] = \"\"\n return d\n", "path": "dvc/progress.py"}], "after_files": [{"content": "import os\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py as _build_py\n\nimport fastentrypoints # noqa: F401\n\n# Prevents pkg_resources import in entry point script,\n# see https://github.com/ninjaaron/fast-entry_points.\n# This saves about 200 ms on startup time for non-wheel installs.\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\npkg_dir = os.path.dirname(os.path.abspath(__file__))\nversion_path = os.path.join(pkg_dir, \"dvc\", \"version.py\")\nif sys.version_info[0] == 2:\n import imp\n\n dvc_version = imp.load_source(\"dvc.version\", version_path)\nelse:\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(\"dvc.version\", version_path)\n dvc_version = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(dvc_version)\n\nversion = dvc_version.__version__ # noqa: F821\n\n\n# To achieve consistency between the build version and the one provided\n# by your package during runtime, you need to **pin** the build version.\n#\n# This custom class will replace the version.py module with a **static**\n# `__version__` that your package can read at runtime, assuring consistancy.\n#\n# References:\n# - https://docs.python.org/3.7/distutils/extending.html\n# - https://github.com/python/mypy\nclass build_py(_build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"dvc\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as fobj:\n fobj.write(\"# AUTOGENERATED at build time by setup.py\\n\")\n fobj.write('__version__ = \"{}\"\\n'.format(version))\n\n def run(self):\n self.execute(self.pin_version, ())\n _build_py.run(self)\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"gitpython>=2.1.8\",\n \"gitdb2>=2.0.6\", # See https://github.com/iterative/dvc/issues/1880\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-ng>=1.4.3\",\n \"requests>=2.22.0\",\n \"grandalf==0.6\",\n \"asciimatics>=1.10.0\",\n \"distro>=1.3.0\",\n \"appdirs>=1.4.3\",\n \"treelib>=1.5.5\",\n \"inflect>=2.1.0\",\n \"humanize>=0.5.1\",\n \"ruamel.yaml>=0.16.1\",\n \"funcy>=1.12\",\n \"pathspec>=0.6.0\",\n \"shortuuid>=0.5.0\",\n \"tqdm>=4.38.0,<5\",\n \"packaging>=19.0\",\n \"win-unicode-console>=0.5; sys_platform == 'win32'\",\n \"pywin32>=225; sys_platform == 'win32'\",\n]\n\nif sys.version_info[0] == 2:\n install_requires.append(\"networkx>=2.1,<2.3\")\nelse:\n install_requires.append(\"networkx>=2.1,<2.4\")\n\n# Extra dependencies for remote integrations\n\ngs = [\"google-cloud-storage==1.19.0\"]\ns3 = [\"boto3==1.9.115\"]\nazure = [\"azure-storage-blob==2.1.0\"]\noss = [\"oss2==2.6.1\"]\nssh = [\"paramiko>=2.5.0\"]\n# gssapi should not be included in all_remotes, because it doesn't have wheels\n# for linux and mac, so it will fail to compile if user doesn't have all the\n# requirements, including kerberos itself. Once all the wheels are available,\n# we can start shipping it by default.\nssh_gssapi = [\"paramiko[gssapi]>=2.5.0\"]\nhdfs = [\"pyarrow==0.14.0\"]\nall_remotes = gs + s3 + azure + ssh + oss\n\nif os.name != \"nt\" or sys.version_info[0] != 2:\n # NOTE: there are no pyarrow wheels for python2 on windows\n all_remotes += hdfs\n\n# Extra dependecies to run tests\ntests_requirements = [\n \"PyInstaller==3.5\",\n \"wheel>=0.31.1\",\n \"pydot>=1.2.4\",\n # Test requirements:\n \"pytest>=4.6.0\",\n \"pytest-timeout>=1.3.3\",\n \"pytest-cov>=2.6.1\",\n \"pytest-xdist>=1.26.1\",\n \"pytest-mock>=1.10.4\",\n \"flaky>=3.5.3\",\n \"mock>=3.0.0\",\n \"xmltodict>=0.11.0\",\n \"awscli==1.16.266\",\n \"google-compute-engine==2.8.13\",\n \"Pygments\", # required by collective.checkdocs,\n \"collective.checkdocs\",\n \"flake8\",\n \"psutil\",\n \"flake8-docstrings\",\n \"pydocstyle<4.0\",\n \"jaraco.windows==3.9.2\",\n \"mock-ssh-server>=0.6.0\",\n \"moto==1.3.14.dev464\",\n \"rangehttpserver==1.2.0\",\n]\n\nif (sys.version_info) >= (3, 6):\n tests_requirements.append(\"black==19.3b0\")\n\nsetup(\n name=\"dvc\",\n version=version,\n description=\"Git for data scientists - manage your code and data together\",\n long_description=open(\"README.rst\", \"r\").read(),\n author=\"Dmitry Petrov\",\n author_email=\"[email protected]\",\n download_url=\"https://github.com/iterative/dvc\",\n license=\"Apache License 2.0\",\n install_requires=install_requires,\n extras_require={\n \"all\": all_remotes,\n \"gs\": gs,\n \"s3\": s3,\n \"azure\": azure,\n \"oss\": oss,\n \"ssh\": ssh,\n \"ssh_gssapi\": ssh_gssapi,\n \"hdfs\": hdfs,\n # NOTE: https://github.com/inveniosoftware/troubleshooting/issues/1\n \":python_version=='2.7'\": [\n \"futures\",\n \"pathlib2\",\n \"zc.lockfile>=1.2.1\",\n ],\n \":python_version>='3.0'\": [\"flufl.lock>=3.2\"],\n \"tests\": tests_requirements,\n },\n keywords=\"data science, data version control, machine learning\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(exclude=[\"tests\"]),\n include_package_data=True,\n url=\"http://dvc.org\",\n entry_points={\"console_scripts\": [\"dvc = dvc.main:main\"]},\n cmdclass={\"build_py\": build_py},\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "\"\"\"Manages progress bars for dvc repo.\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport sys\nfrom threading import RLock\n\nfrom funcy import merge\nfrom tqdm import tqdm\n\nfrom dvc.utils import env2bool\n\nlogger = logging.getLogger(__name__)\ntqdm.set_lock(RLock())\n\n\nclass Tqdm(tqdm):\n \"\"\"\n maximum-compatibility tqdm-based progressbars\n \"\"\"\n\n BAR_FMT_DEFAULT = (\n \"{percentage:3.0f}%|{bar:10}|\"\n \"{desc:{ncols_desc}.{ncols_desc}}{n}/{total}\"\n \" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]\"\n )\n BAR_FMT_NOTOTAL = (\n \"{desc:{ncols_desc}.{ncols_desc}}{n}\"\n \" [{elapsed}<??:??, {rate_fmt:>11}{postfix}]\"\n )\n\n def __init__(\n self,\n iterable=None,\n disable=None,\n level=logging.ERROR,\n desc=None,\n leave=False,\n bar_format=None,\n bytes=False, # pylint: disable=W0622\n file=None,\n **kwargs\n ):\n \"\"\"\n bytes : shortcut for\n `unit='B', unit_scale=True, unit_divisor=1024, miniters=1`\n desc : persists after `close()`\n level : effective logging level for determining `disable`;\n used only if `disable` is unspecified\n disable : If (default: None), will be determined by logging level.\n May be overridden to `True` due to non-TTY status.\n Skip override by specifying env var `DVC_IGNORE_ISATTY`.\n kwargs : anything accepted by `tqdm.tqdm()`\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"unit_scale\", True)\n if bytes:\n bytes_defaults = dict(\n unit=\"B\", unit_scale=True, unit_divisor=1024, miniters=1\n )\n kwargs = merge(bytes_defaults, kwargs)\n if file is None:\n file = sys.stderr\n self.desc_persist = desc\n # auto-disable based on `logger.level`\n if disable is None:\n disable = logger.getEffectiveLevel() > level\n # auto-disable based on TTY\n if (\n not disable\n and not env2bool(\"DVC_IGNORE_ISATTY\")\n and hasattr(file, \"isatty\")\n ):\n disable = not file.isatty()\n super(Tqdm, self).__init__(\n iterable=iterable,\n disable=disable,\n leave=leave,\n desc=desc,\n bar_format=\"!\",\n lock_args=(False,),\n **kwargs\n )\n if bar_format is None:\n if self.__len__():\n self.bar_format = self.BAR_FMT_DEFAULT\n else:\n self.bar_format = self.BAR_FMT_NOTOTAL\n else:\n self.bar_format = bar_format\n self.refresh()\n\n def update_desc(self, desc, n=1):\n \"\"\"\n Calls `set_description_str(desc)` and `update(n)`\n \"\"\"\n self.set_description_str(desc, refresh=False)\n self.update(n)\n\n def update_to(self, current, total=None):\n if total:\n self.total = total # pylint: disable=W0613,W0201\n self.update(current - self.n)\n\n def close(self):\n if self.desc_persist is not None:\n self.set_description_str(self.desc_persist, refresh=False)\n super(Tqdm, self).close()\n\n @property\n def format_dict(self):\n \"\"\"inject `ncols_desc` to fill the display width (`ncols`)\"\"\"\n d = super(Tqdm, self).format_dict\n ncols = d[\"ncols\"] or 80\n ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1\n ncols_desc = max(ncols_desc, 0)\n if ncols_desc:\n d[\"ncols_desc\"] = ncols_desc\n else:\n # work-around for zero-width description\n d[\"ncols_desc\"] = 1\n d[\"prefix\"] = \"\"\n return d\n", "path": "dvc/progress.py"}]}
| 3,826 | 303 |
gh_patches_debug_2329
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-5539
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scim_sync_all task fails
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Logs**
<details>
<summary>Stacktrace from authentik</summary>
```
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/celery/app/trace.py", line 451, in trace_task
R = retval = fun(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py", line 231, in _inner
reraise(*exc_info)
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/_compat.py", line 60, in reraise
raise value
File "/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py", line 226, in _inner
return f(*args, **kwargs)
^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/celery/app/trace.py", line 734, in __protected_call__
return self.run(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/authentik/providers/scim/tasks.py", line 38, in scim_sync_all
for provider in SCIMProvider.objects.all(backchannel_application__isnull=False):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
builtins.TypeError: BaseManager.all() got an unexpected keyword argument 'backchannel_application__isnull'
```
</details>
**Version and Deployment (please complete the following information):**
- authentik version: 2023.4.1
- Deployment: [e.g. docker-compose, helm]
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/providers/scim/tasks.py`
Content:
```
1 """SCIM Provider tasks"""
2 from typing import Any, Optional
3
4 from celery.result import allow_join_result
5 from django.core.paginator import Paginator
6 from django.db.models import Model, QuerySet
7 from django.utils.text import slugify
8 from django.utils.translation import gettext_lazy as _
9 from pydanticscim.responses import PatchOp
10 from structlog.stdlib import get_logger
11
12 from authentik.core.models import Group, User
13 from authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus
14 from authentik.lib.utils.reflection import path_to_class
15 from authentik.providers.scim.clients import PAGE_SIZE
16 from authentik.providers.scim.clients.base import SCIMClient
17 from authentik.providers.scim.clients.exceptions import SCIMRequestException, StopSync
18 from authentik.providers.scim.clients.group import SCIMGroupClient
19 from authentik.providers.scim.clients.user import SCIMUserClient
20 from authentik.providers.scim.models import SCIMProvider
21 from authentik.root.celery import CELERY_APP
22
23 LOGGER = get_logger(__name__)
24
25
26 def client_for_model(provider: SCIMProvider, model: Model) -> SCIMClient:
27 """Get SCIM client for model"""
28 if isinstance(model, User):
29 return SCIMUserClient(provider)
30 if isinstance(model, Group):
31 return SCIMGroupClient(provider)
32 raise ValueError(f"Invalid model {model}")
33
34
35 @CELERY_APP.task()
36 def scim_sync_all():
37 """Run sync for all providers"""
38 for provider in SCIMProvider.objects.all(backchannel_application__isnull=False):
39 scim_sync.delay(provider.pk)
40
41
42 @CELERY_APP.task(bind=True, base=MonitoredTask)
43 def scim_sync(self: MonitoredTask, provider_pk: int) -> None:
44 """Run SCIM full sync for provider"""
45 provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
46 if not provider:
47 return
48 self.set_uid(slugify(provider.name))
49 result = TaskResult(TaskResultStatus.SUCCESSFUL, [])
50 result.messages.append(_("Starting full SCIM sync"))
51 LOGGER.debug("Starting SCIM sync")
52 users_paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)
53 groups_paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)
54 with allow_join_result():
55 try:
56 for page in users_paginator.page_range:
57 result.messages.append(_("Syncing page %(page)d of users" % {"page": page}))
58 for msg in scim_sync_users.delay(page, provider_pk).get():
59 result.messages.append(msg)
60 for page in groups_paginator.page_range:
61 result.messages.append(_("Syncing page %(page)d of groups" % {"page": page}))
62 for msg in scim_sync_group.delay(page, provider_pk).get():
63 result.messages.append(msg)
64 except StopSync as exc:
65 self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))
66 return
67 self.set_status(result)
68
69
70 @CELERY_APP.task()
71 def scim_sync_users(page: int, provider_pk: int):
72 """Sync single or multiple users to SCIM"""
73 messages = []
74 provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
75 if not provider:
76 return messages
77 try:
78 client = SCIMUserClient(provider)
79 except SCIMRequestException:
80 return messages
81 paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)
82 LOGGER.debug("starting user sync for page", page=page)
83 for user in paginator.page(page).object_list:
84 try:
85 client.write(user)
86 except SCIMRequestException as exc:
87 LOGGER.warning("failed to sync user", exc=exc, user=user)
88 messages.append(
89 _(
90 "Failed to sync user due to remote error %(name)s: %(error)s"
91 % {
92 "name": user.username,
93 "error": str(exc),
94 }
95 )
96 )
97 except StopSync as exc:
98 LOGGER.warning("Stopping sync", exc=exc)
99 messages.append(
100 _(
101 "Stopping sync due to error: %(error)s"
102 % {
103 "error": str(exc),
104 }
105 )
106 )
107 break
108 return messages
109
110
111 @CELERY_APP.task()
112 def scim_sync_group(page: int, provider_pk: int):
113 """Sync single or multiple groups to SCIM"""
114 messages = []
115 provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()
116 if not provider:
117 return messages
118 try:
119 client = SCIMGroupClient(provider)
120 except SCIMRequestException:
121 return messages
122 paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)
123 LOGGER.debug("starting group sync for page", page=page)
124 for group in paginator.page(page).object_list:
125 try:
126 client.write(group)
127 except SCIMRequestException as exc:
128 LOGGER.warning("failed to sync group", exc=exc, group=group)
129 messages.append(
130 _(
131 "Failed to sync group due to remote error %(name)s: %(error)s"
132 % {
133 "name": group.name,
134 "error": str(exc),
135 }
136 )
137 )
138 except StopSync as exc:
139 LOGGER.warning("Stopping sync", exc=exc)
140 messages.append(
141 _(
142 "Stopping sync due to error: %(error)s"
143 % {
144 "error": str(exc),
145 }
146 )
147 )
148 break
149 return messages
150
151
152 @CELERY_APP.task()
153 def scim_signal_direct(model: str, pk: Any, raw_op: str):
154 """Handler for post_save and pre_delete signal"""
155 model_class: type[Model] = path_to_class(model)
156 instance = model_class.objects.filter(pk=pk).first()
157 if not instance:
158 return
159 operation = PatchOp(raw_op)
160 for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):
161 client = client_for_model(provider, instance)
162 # Check if the object is allowed within the provider's restrictions
163 queryset: Optional[QuerySet] = None
164 if isinstance(instance, User):
165 queryset = provider.get_user_qs()
166 if isinstance(instance, Group):
167 queryset = provider.get_group_qs()
168 if not queryset:
169 continue
170
171 # The queryset we get from the provider must include the instance we've got given
172 # otherwise ignore this provider
173 if not queryset.filter(pk=instance.pk).exists():
174 continue
175
176 try:
177 if operation == PatchOp.add:
178 client.write(instance)
179 if operation == PatchOp.remove:
180 client.delete(instance)
181 except (StopSync, SCIMRequestException) as exc:
182 LOGGER.warning(exc)
183
184
185 @CELERY_APP.task()
186 def scim_signal_m2m(group_pk: str, action: str, pk_set: list[int]):
187 """Update m2m (group membership)"""
188 group = Group.objects.filter(pk=group_pk).first()
189 if not group:
190 return
191 for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):
192 # Check if the object is allowed within the provider's restrictions
193 queryset: QuerySet = provider.get_group_qs()
194 # The queryset we get from the provider must include the instance we've got given
195 # otherwise ignore this provider
196 if not queryset.filter(pk=group_pk).exists():
197 continue
198
199 client = SCIMGroupClient(provider)
200 try:
201 operation = None
202 if action == "post_add":
203 operation = PatchOp.add
204 if action == "post_remove":
205 operation = PatchOp.remove
206 client.update_group(group, operation, pk_set)
207 except (StopSync, SCIMRequestException) as exc:
208 LOGGER.warning(exc)
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/providers/scim/tasks.py b/authentik/providers/scim/tasks.py
--- a/authentik/providers/scim/tasks.py
+++ b/authentik/providers/scim/tasks.py
@@ -35,7 +35,7 @@
@CELERY_APP.task()
def scim_sync_all():
"""Run sync for all providers"""
- for provider in SCIMProvider.objects.all(backchannel_application__isnull=False):
+ for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):
scim_sync.delay(provider.pk)
|
{"golden_diff": "diff --git a/authentik/providers/scim/tasks.py b/authentik/providers/scim/tasks.py\n--- a/authentik/providers/scim/tasks.py\n+++ b/authentik/providers/scim/tasks.py\n@@ -35,7 +35,7 @@\n @CELERY_APP.task()\n def scim_sync_all():\n \"\"\"Run sync for all providers\"\"\"\n- for provider in SCIMProvider.objects.all(backchannel_application__isnull=False):\n+ for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n scim_sync.delay(provider.pk)\n", "issue": "scim_sync_all task fails\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.11/site-packages/celery/app/trace.py\", line 451, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py\", line 231, in _inner\r\n reraise(*exc_info)\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/_compat.py\", line 60, in reraise\r\n raise value\r\n File \"/usr/local/lib/python3.11/site-packages/sentry_sdk/integrations/celery.py\", line 226, in _inner\r\n return f(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/lib/python3.11/site-packages/celery/app/trace.py\", line 734, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/providers/scim/tasks.py\", line 38, in scim_sync_all\r\n for provider in SCIMProvider.objects.all(backchannel_application__isnull=False):\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nbuiltins.TypeError: BaseManager.all() got an unexpected keyword argument 'backchannel_application__isnull'\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: 2023.4.1\r\n- Deployment: [e.g. docker-compose, helm]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n \n", "before_files": [{"content": "\"\"\"SCIM Provider tasks\"\"\"\nfrom typing import Any, Optional\n\nfrom celery.result import allow_join_result\nfrom django.core.paginator import Paginator\nfrom django.db.models import Model, QuerySet\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom pydanticscim.responses import PatchOp\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import Group, User\nfrom authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus\nfrom authentik.lib.utils.reflection import path_to_class\nfrom authentik.providers.scim.clients import PAGE_SIZE\nfrom authentik.providers.scim.clients.base import SCIMClient\nfrom authentik.providers.scim.clients.exceptions import SCIMRequestException, StopSync\nfrom authentik.providers.scim.clients.group import SCIMGroupClient\nfrom authentik.providers.scim.clients.user import SCIMUserClient\nfrom authentik.providers.scim.models import SCIMProvider\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger(__name__)\n\n\ndef client_for_model(provider: SCIMProvider, model: Model) -> SCIMClient:\n \"\"\"Get SCIM client for model\"\"\"\n if isinstance(model, User):\n return SCIMUserClient(provider)\n if isinstance(model, Group):\n return SCIMGroupClient(provider)\n raise ValueError(f\"Invalid model {model}\")\n\n\n@CELERY_APP.task()\ndef scim_sync_all():\n \"\"\"Run sync for all providers\"\"\"\n for provider in SCIMProvider.objects.all(backchannel_application__isnull=False):\n scim_sync.delay(provider.pk)\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\ndef scim_sync(self: MonitoredTask, provider_pk: int) -> None:\n \"\"\"Run SCIM full sync for provider\"\"\"\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return\n self.set_uid(slugify(provider.name))\n result = TaskResult(TaskResultStatus.SUCCESSFUL, [])\n result.messages.append(_(\"Starting full SCIM sync\"))\n LOGGER.debug(\"Starting SCIM sync\")\n users_paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)\n groups_paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)\n with allow_join_result():\n try:\n for page in users_paginator.page_range:\n result.messages.append(_(\"Syncing page %(page)d of users\" % {\"page\": page}))\n for msg in scim_sync_users.delay(page, provider_pk).get():\n result.messages.append(msg)\n for page in groups_paginator.page_range:\n result.messages.append(_(\"Syncing page %(page)d of groups\" % {\"page\": page}))\n for msg in scim_sync_group.delay(page, provider_pk).get():\n result.messages.append(msg)\n except StopSync as exc:\n self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))\n return\n self.set_status(result)\n\n\n@CELERY_APP.task()\ndef scim_sync_users(page: int, provider_pk: int):\n \"\"\"Sync single or multiple users to SCIM\"\"\"\n messages = []\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return messages\n try:\n client = SCIMUserClient(provider)\n except SCIMRequestException:\n return messages\n paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)\n LOGGER.debug(\"starting user sync for page\", page=page)\n for user in paginator.page(page).object_list:\n try:\n client.write(user)\n except SCIMRequestException as exc:\n LOGGER.warning(\"failed to sync user\", exc=exc, user=user)\n messages.append(\n _(\n \"Failed to sync user due to remote error %(name)s: %(error)s\"\n % {\n \"name\": user.username,\n \"error\": str(exc),\n }\n )\n )\n except StopSync as exc:\n LOGGER.warning(\"Stopping sync\", exc=exc)\n messages.append(\n _(\n \"Stopping sync due to error: %(error)s\"\n % {\n \"error\": str(exc),\n }\n )\n )\n break\n return messages\n\n\n@CELERY_APP.task()\ndef scim_sync_group(page: int, provider_pk: int):\n \"\"\"Sync single or multiple groups to SCIM\"\"\"\n messages = []\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return messages\n try:\n client = SCIMGroupClient(provider)\n except SCIMRequestException:\n return messages\n paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)\n LOGGER.debug(\"starting group sync for page\", page=page)\n for group in paginator.page(page).object_list:\n try:\n client.write(group)\n except SCIMRequestException as exc:\n LOGGER.warning(\"failed to sync group\", exc=exc, group=group)\n messages.append(\n _(\n \"Failed to sync group due to remote error %(name)s: %(error)s\"\n % {\n \"name\": group.name,\n \"error\": str(exc),\n }\n )\n )\n except StopSync as exc:\n LOGGER.warning(\"Stopping sync\", exc=exc)\n messages.append(\n _(\n \"Stopping sync due to error: %(error)s\"\n % {\n \"error\": str(exc),\n }\n )\n )\n break\n return messages\n\n\n@CELERY_APP.task()\ndef scim_signal_direct(model: str, pk: Any, raw_op: str):\n \"\"\"Handler for post_save and pre_delete signal\"\"\"\n model_class: type[Model] = path_to_class(model)\n instance = model_class.objects.filter(pk=pk).first()\n if not instance:\n return\n operation = PatchOp(raw_op)\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n client = client_for_model(provider, instance)\n # Check if the object is allowed within the provider's restrictions\n queryset: Optional[QuerySet] = None\n if isinstance(instance, User):\n queryset = provider.get_user_qs()\n if isinstance(instance, Group):\n queryset = provider.get_group_qs()\n if not queryset:\n continue\n\n # The queryset we get from the provider must include the instance we've got given\n # otherwise ignore this provider\n if not queryset.filter(pk=instance.pk).exists():\n continue\n\n try:\n if operation == PatchOp.add:\n client.write(instance)\n if operation == PatchOp.remove:\n client.delete(instance)\n except (StopSync, SCIMRequestException) as exc:\n LOGGER.warning(exc)\n\n\n@CELERY_APP.task()\ndef scim_signal_m2m(group_pk: str, action: str, pk_set: list[int]):\n \"\"\"Update m2m (group membership)\"\"\"\n group = Group.objects.filter(pk=group_pk).first()\n if not group:\n return\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n # Check if the object is allowed within the provider's restrictions\n queryset: QuerySet = provider.get_group_qs()\n # The queryset we get from the provider must include the instance we've got given\n # otherwise ignore this provider\n if not queryset.filter(pk=group_pk).exists():\n continue\n\n client = SCIMGroupClient(provider)\n try:\n operation = None\n if action == \"post_add\":\n operation = PatchOp.add\n if action == \"post_remove\":\n operation = PatchOp.remove\n client.update_group(group, operation, pk_set)\n except (StopSync, SCIMRequestException) as exc:\n LOGGER.warning(exc)\n", "path": "authentik/providers/scim/tasks.py"}], "after_files": [{"content": "\"\"\"SCIM Provider tasks\"\"\"\nfrom typing import Any, Optional\n\nfrom celery.result import allow_join_result\nfrom django.core.paginator import Paginator\nfrom django.db.models import Model, QuerySet\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom pydanticscim.responses import PatchOp\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import Group, User\nfrom authentik.events.monitored_tasks import MonitoredTask, TaskResult, TaskResultStatus\nfrom authentik.lib.utils.reflection import path_to_class\nfrom authentik.providers.scim.clients import PAGE_SIZE\nfrom authentik.providers.scim.clients.base import SCIMClient\nfrom authentik.providers.scim.clients.exceptions import SCIMRequestException, StopSync\nfrom authentik.providers.scim.clients.group import SCIMGroupClient\nfrom authentik.providers.scim.clients.user import SCIMUserClient\nfrom authentik.providers.scim.models import SCIMProvider\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger(__name__)\n\n\ndef client_for_model(provider: SCIMProvider, model: Model) -> SCIMClient:\n \"\"\"Get SCIM client for model\"\"\"\n if isinstance(model, User):\n return SCIMUserClient(provider)\n if isinstance(model, Group):\n return SCIMGroupClient(provider)\n raise ValueError(f\"Invalid model {model}\")\n\n\n@CELERY_APP.task()\ndef scim_sync_all():\n \"\"\"Run sync for all providers\"\"\"\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n scim_sync.delay(provider.pk)\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\ndef scim_sync(self: MonitoredTask, provider_pk: int) -> None:\n \"\"\"Run SCIM full sync for provider\"\"\"\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return\n self.set_uid(slugify(provider.name))\n result = TaskResult(TaskResultStatus.SUCCESSFUL, [])\n result.messages.append(_(\"Starting full SCIM sync\"))\n LOGGER.debug(\"Starting SCIM sync\")\n users_paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)\n groups_paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)\n with allow_join_result():\n try:\n for page in users_paginator.page_range:\n result.messages.append(_(\"Syncing page %(page)d of users\" % {\"page\": page}))\n for msg in scim_sync_users.delay(page, provider_pk).get():\n result.messages.append(msg)\n for page in groups_paginator.page_range:\n result.messages.append(_(\"Syncing page %(page)d of groups\" % {\"page\": page}))\n for msg in scim_sync_group.delay(page, provider_pk).get():\n result.messages.append(msg)\n except StopSync as exc:\n self.set_status(TaskResult(TaskResultStatus.ERROR).with_error(exc))\n return\n self.set_status(result)\n\n\n@CELERY_APP.task()\ndef scim_sync_users(page: int, provider_pk: int):\n \"\"\"Sync single or multiple users to SCIM\"\"\"\n messages = []\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return messages\n try:\n client = SCIMUserClient(provider)\n except SCIMRequestException:\n return messages\n paginator = Paginator(provider.get_user_qs(), PAGE_SIZE)\n LOGGER.debug(\"starting user sync for page\", page=page)\n for user in paginator.page(page).object_list:\n try:\n client.write(user)\n except SCIMRequestException as exc:\n LOGGER.warning(\"failed to sync user\", exc=exc, user=user)\n messages.append(\n _(\n \"Failed to sync user due to remote error %(name)s: %(error)s\"\n % {\n \"name\": user.username,\n \"error\": str(exc),\n }\n )\n )\n except StopSync as exc:\n LOGGER.warning(\"Stopping sync\", exc=exc)\n messages.append(\n _(\n \"Stopping sync due to error: %(error)s\"\n % {\n \"error\": str(exc),\n }\n )\n )\n break\n return messages\n\n\n@CELERY_APP.task()\ndef scim_sync_group(page: int, provider_pk: int):\n \"\"\"Sync single or multiple groups to SCIM\"\"\"\n messages = []\n provider: SCIMProvider = SCIMProvider.objects.filter(pk=provider_pk).first()\n if not provider:\n return messages\n try:\n client = SCIMGroupClient(provider)\n except SCIMRequestException:\n return messages\n paginator = Paginator(provider.get_group_qs(), PAGE_SIZE)\n LOGGER.debug(\"starting group sync for page\", page=page)\n for group in paginator.page(page).object_list:\n try:\n client.write(group)\n except SCIMRequestException as exc:\n LOGGER.warning(\"failed to sync group\", exc=exc, group=group)\n messages.append(\n _(\n \"Failed to sync group due to remote error %(name)s: %(error)s\"\n % {\n \"name\": group.name,\n \"error\": str(exc),\n }\n )\n )\n except StopSync as exc:\n LOGGER.warning(\"Stopping sync\", exc=exc)\n messages.append(\n _(\n \"Stopping sync due to error: %(error)s\"\n % {\n \"error\": str(exc),\n }\n )\n )\n break\n return messages\n\n\n@CELERY_APP.task()\ndef scim_signal_direct(model: str, pk: Any, raw_op: str):\n \"\"\"Handler for post_save and pre_delete signal\"\"\"\n model_class: type[Model] = path_to_class(model)\n instance = model_class.objects.filter(pk=pk).first()\n if not instance:\n return\n operation = PatchOp(raw_op)\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n client = client_for_model(provider, instance)\n # Check if the object is allowed within the provider's restrictions\n queryset: Optional[QuerySet] = None\n if isinstance(instance, User):\n queryset = provider.get_user_qs()\n if isinstance(instance, Group):\n queryset = provider.get_group_qs()\n if not queryset:\n continue\n\n # The queryset we get from the provider must include the instance we've got given\n # otherwise ignore this provider\n if not queryset.filter(pk=instance.pk).exists():\n continue\n\n try:\n if operation == PatchOp.add:\n client.write(instance)\n if operation == PatchOp.remove:\n client.delete(instance)\n except (StopSync, SCIMRequestException) as exc:\n LOGGER.warning(exc)\n\n\n@CELERY_APP.task()\ndef scim_signal_m2m(group_pk: str, action: str, pk_set: list[int]):\n \"\"\"Update m2m (group membership)\"\"\"\n group = Group.objects.filter(pk=group_pk).first()\n if not group:\n return\n for provider in SCIMProvider.objects.filter(backchannel_application__isnull=False):\n # Check if the object is allowed within the provider's restrictions\n queryset: QuerySet = provider.get_group_qs()\n # The queryset we get from the provider must include the instance we've got given\n # otherwise ignore this provider\n if not queryset.filter(pk=group_pk).exists():\n continue\n\n client = SCIMGroupClient(provider)\n try:\n operation = None\n if action == \"post_add\":\n operation = PatchOp.add\n if action == \"post_remove\":\n operation = PatchOp.remove\n client.update_group(group, operation, pk_set)\n except (StopSync, SCIMRequestException) as exc:\n LOGGER.warning(exc)\n", "path": "authentik/providers/scim/tasks.py"}]}
| 2,916 | 120 |
gh_patches_debug_4527
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-321
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changes to web3.eth.estimateGas
Check whether 'from' parameter is already in transaction and use it. Otherwise use defaultAccount.
Basically to implement the same behavior as for 'sendTransaction'.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/eth.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from cytoolz import compose
4 from cytoolz.dicttoolz import (
5 assoc,
6 )
7
8 from eth_utils import (
9 is_address,
10 is_string,
11 keccak,
12 )
13
14 from web3.iban import Iban
15
16 from web3.contract import (
17 Contract,
18 )
19 from web3.module import (
20 Module,
21 )
22
23 from web3.utils.blocks import (
24 select_method_for_block_identifier,
25 )
26 from web3.utils.signing import (
27 signature_wrapper,
28 )
29 from web3.utils.empty import (
30 empty,
31 )
32 from web3.utils.encoding import (
33 to_bytes,
34 to_hex,
35 )
36 from web3.utils.filters import (
37 BlockFilter,
38 TransactionFilter,
39 LogFilter,
40 )
41 from web3.utils.transactions import (
42 get_buffered_gas_estimate,
43 )
44 from web3.utils.validation import (
45 validate_address,
46 validate_address_checksum,
47 )
48
49
50 class Eth(Module):
51 defaultAccount = empty
52 defaultBlock = "latest"
53 defaultContractFactory = Contract
54 iban = Iban
55
56 def namereg(self):
57 raise NotImplementedError()
58
59 def icapNamereg(self):
60 raise NotImplementedError()
61
62 @property
63 def protocolVersion(self):
64 return self.web3.manager.request_blocking("eth_protocolVersion", [])
65
66 @property
67 def syncing(self):
68 return self.web3.manager.request_blocking("eth_syncing", [])
69
70 @property
71 def coinbase(self):
72 return self.web3.manager.request_blocking("eth_coinbase", [])
73
74 @property
75 def mining(self):
76 return self.web3.manager.request_blocking("eth_mining", [])
77
78 @property
79 def hashrate(self):
80 return self.web3.manager.request_blocking("eth_hashrate", [])
81
82 @property
83 def gasPrice(self):
84 return self.web3.manager.request_blocking("eth_gasPrice", [])
85
86 @property
87 def accounts(self):
88 return self.web3.manager.request_blocking("eth_accounts", [])
89
90 @property
91 def blockNumber(self):
92 return self.web3.manager.request_blocking("eth_blockNumber", [])
93
94 def getBalance(self, account, block_identifier=None):
95 if block_identifier is None:
96 block_identifier = self.defaultBlock
97 return self.web3.manager.request_blocking(
98 "eth_getBalance",
99 [account, block_identifier],
100 )
101
102 def getStorageAt(self, account, position, block_identifier=None):
103 if block_identifier is None:
104 block_identifier = self.defaultBlock
105 return self.web3.manager.request_blocking(
106 "eth_getStorageAt",
107 [account, position, block_identifier]
108 )
109
110 def getCode(self, account, block_identifier=None):
111 if block_identifier is None:
112 block_identifier = self.defaultBlock
113 return self.web3.manager.request_blocking(
114 "eth_getCode",
115 [account, block_identifier],
116 )
117
118 def getBlock(self, block_identifier, full_transactions=False):
119 """
120 `eth_getBlockByHash`
121 `eth_getBlockByNumber`
122 """
123 method = select_method_for_block_identifier(
124 block_identifier,
125 if_predefined='eth_getBlockByNumber',
126 if_hash='eth_getBlockByHash',
127 if_number='eth_getBlockByNumber',
128 )
129
130 return self.web3.manager.request_blocking(
131 method,
132 [block_identifier, full_transactions],
133 )
134
135 def getBlockTransactionCount(self, block_identifier):
136 """
137 `eth_getBlockTransactionCountByHash`
138 `eth_getBlockTransactionCountByNumber`
139 """
140 method = select_method_for_block_identifier(
141 block_identifier,
142 if_predefined='eth_getBlockTransactionCountByNumber',
143 if_hash='eth_getBlockTransactionCountByHash',
144 if_number='eth_getBlockTransactionCountByNumber',
145 )
146 return self.web3.manager.request_blocking(
147 method,
148 [block_identifier],
149 )
150
151 def getUncleCount(self, block_identifier):
152 """
153 `eth_getUncleCountByBlockHash`
154 `eth_getUncleCountByBlockNumber`
155 """
156 method = select_method_for_block_identifier(
157 block_identifier,
158 if_predefined='eth_getUncleCountByBlockNumber',
159 if_hash='eth_getUncleCountByBlockHash',
160 if_number='eth_getUncleCountByBlockNumber',
161 )
162 return self.web3.manager.request_blocking(
163 method,
164 [block_identifier],
165 )
166
167 def getTransaction(self, transaction_hash):
168 return self.web3.manager.request_blocking(
169 "eth_getTransactionByHash",
170 [transaction_hash],
171 )
172
173 def getTransactionFromBlock(self, block_identifier, transaction_index):
174 """
175 `eth_getTransactionByBlockHashAndIndex`
176 `eth_getTransactionByBlockNumberAndIndex`
177 """
178 method = select_method_for_block_identifier(
179 block_identifier,
180 if_predefined='eth_getTransactionByBlockNumberAndIndex',
181 if_hash='eth_getTransactionByBlockHashAndIndex',
182 if_number='eth_getTransactionByBlockNumberAndIndex',
183 )
184 return self.web3.manager.request_blocking(
185 method,
186 [block_identifier, transaction_index],
187 )
188
189 def getTransactionReceipt(self, transaction_hash):
190 return self.web3.manager.request_blocking(
191 "eth_getTransactionReceipt",
192 [transaction_hash],
193 )
194
195 def getTransactionCount(self, account, block_identifier=None):
196 if block_identifier is None:
197 block_identifier = self.defaultBlock
198 return self.web3.manager.request_blocking(
199 "eth_getTransactionCount",
200 [
201 account,
202 block_identifier,
203 ],
204 )
205
206 def sendTransaction(self, transaction):
207 # TODO: move to middleware
208 if 'from' not in transaction and is_address(self.defaultAccount):
209 transaction = assoc(transaction, 'from', self.defaultAccount)
210
211 # TODO: move gas estimation in middleware
212 if 'gas' not in transaction:
213 transaction = assoc(
214 transaction,
215 'gas',
216 get_buffered_gas_estimate(self.web3, transaction),
217 )
218
219 return self.web3.manager.request_blocking(
220 "eth_sendTransaction",
221 [transaction],
222 )
223
224 def sendRawTransaction(self, raw_transaction):
225 return self.web3.manager.request_blocking(
226 "eth_sendRawTransaction",
227 [raw_transaction],
228 )
229
230 def sign(self, account, data=None, hexstr=None, text=None):
231 message_hex = to_hex(data, hexstr=hexstr, text=text)
232 return self.web3.manager.request_blocking(
233 "eth_sign", [account, message_hex],
234 )
235
236 @staticmethod
237 def _recoveryMessageHash(data=None, hexstr=None, text=None):
238 message_bytes = to_bytes(data, hexstr=hexstr, text=text)
239 recovery_hasher = compose(to_hex, keccak, signature_wrapper)
240 return recovery_hasher(message_bytes)
241
242 def call(self, transaction, block_identifier=None):
243 # TODO: move to middleware
244 if 'from' not in transaction and is_address(self.defaultAccount):
245 transaction = assoc(transaction, 'from', self.defaultAccount)
246
247 # TODO: move to middleware
248 if block_identifier is None:
249 block_identifier = self.defaultBlock
250
251 return self.web3.manager.request_blocking(
252 "eth_call",
253 [transaction, block_identifier],
254 )
255
256 def estimateGas(self, transaction):
257 # TODO: move to middleware
258 if is_address(self.defaultAccount):
259 transaction = assoc(transaction, 'from', self.defaultAccount)
260
261 return self.web3.manager.request_blocking(
262 "eth_estimateGas",
263 [transaction],
264 )
265
266 def filter(self, filter_params):
267 if is_string(filter_params):
268 if filter_params == "latest":
269 filter_id = self.web3.manager.request_blocking(
270 "eth_newBlockFilter", [],
271 )
272 return BlockFilter(self.web3, filter_id)
273 elif filter_params == "pending":
274 filter_id = self.web3.manager.request_blocking(
275 "eth_newPendingTransactionFilter", [],
276 )
277 return TransactionFilter(self.web3, filter_id)
278 else:
279 raise ValueError(
280 "The filter API only accepts the values of `pending` or "
281 "`latest` for string based filters"
282 )
283 elif isinstance(filter_params, dict):
284 filter_id = self.web3.manager.request_blocking(
285 "eth_newFilter",
286 [filter_params],
287 )
288 return LogFilter(self.web3, filter_id)
289 else:
290 raise ValueError("Must provide either a string or a valid filter object")
291
292 def getFilterChanges(self, filter_id):
293 return self.web3.manager.request_blocking(
294 "eth_getFilterChanges", [filter_id],
295 )
296
297 def getFilterLogs(self, filter_id):
298 return self.web3.manager.request_blocking(
299 "eth_getFilterLogs", [filter_id],
300 )
301
302 def getLogs(self, filter_params):
303 raise NotImplementedError("Not yet implemented")
304
305 def uninstallFilter(self, filter_id):
306 return self.web3.manager.request_blocking(
307 "eth_uninstallFilter", [filter_id],
308 )
309
310 def contract(self,
311 *args,
312 **kwargs):
313 ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)
314 contract_name = kwargs.pop('contract_name', None)
315
316 has_address = any((
317 'address' in kwargs,
318 len(args) >= 1 and is_address(args[0]),
319 len(args) >= 2 and is_address(args[1]),
320 ))
321
322 for potential_address in args:
323 validate_address_checksum(potential_address)
324
325 if has_address:
326 if 'address' in kwargs:
327 address = kwargs.pop('address')
328 elif is_address(args[0]):
329 address = args[0]
330 elif is_address(args[1]):
331 address = args[1]
332 kwargs['abi'] = args[0]
333 validate_address(address)
334
335 return ContractFactoryClass.factory(self.web3, contract_name, **kwargs)(address)
336 else:
337 try:
338 kwargs['abi'] = args[0]
339 except IndexError:
340 pass
341 return ContractFactoryClass.factory(self.web3, contract_name, **kwargs)
342
343 def setContractFactory(self, contractFactory):
344 self.defaultContractFactory = contractFactory
345
346 def getCompilers(self):
347 return self.web3.manager.request_blocking("eth_getCompilers", [])
348
349 def getWork(self):
350 return self.web3.manager.request_blocking("eth_getWork", [])
351
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/web3/eth.py b/web3/eth.py
--- a/web3/eth.py
+++ b/web3/eth.py
@@ -255,7 +255,7 @@
def estimateGas(self, transaction):
# TODO: move to middleware
- if is_address(self.defaultAccount):
+ if 'from' not in transaction and is_address(self.defaultAccount):
transaction = assoc(transaction, 'from', self.defaultAccount)
return self.web3.manager.request_blocking(
|
{"golden_diff": "diff --git a/web3/eth.py b/web3/eth.py\n--- a/web3/eth.py\n+++ b/web3/eth.py\n@@ -255,7 +255,7 @@\n \n def estimateGas(self, transaction):\n # TODO: move to middleware\n- if is_address(self.defaultAccount):\n+ if 'from' not in transaction and is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n \n return self.web3.manager.request_blocking(\n", "issue": "Changes to web3.eth.estimateGas\nCheck whether 'from' parameter is already in transaction and use it. Otherwise use defaultAccount.\r\nBasically to implement the same behavior as for 'sendTransaction'.\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom cytoolz import compose\nfrom cytoolz.dicttoolz import (\n assoc,\n)\n\nfrom eth_utils import (\n is_address,\n is_string,\n keccak,\n)\n\nfrom web3.iban import Iban\n\nfrom web3.contract import (\n Contract,\n)\nfrom web3.module import (\n Module,\n)\n\nfrom web3.utils.blocks import (\n select_method_for_block_identifier,\n)\nfrom web3.utils.signing import (\n signature_wrapper,\n)\nfrom web3.utils.empty import (\n empty,\n)\nfrom web3.utils.encoding import (\n to_bytes,\n to_hex,\n)\nfrom web3.utils.filters import (\n BlockFilter,\n TransactionFilter,\n LogFilter,\n)\nfrom web3.utils.transactions import (\n get_buffered_gas_estimate,\n)\nfrom web3.utils.validation import (\n validate_address,\n validate_address_checksum,\n)\n\n\nclass Eth(Module):\n defaultAccount = empty\n defaultBlock = \"latest\"\n defaultContractFactory = Contract\n iban = Iban\n\n def namereg(self):\n raise NotImplementedError()\n\n def icapNamereg(self):\n raise NotImplementedError()\n\n @property\n def protocolVersion(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n\n @property\n def syncing(self):\n return self.web3.manager.request_blocking(\"eth_syncing\", [])\n\n @property\n def coinbase(self):\n return self.web3.manager.request_blocking(\"eth_coinbase\", [])\n\n @property\n def mining(self):\n return self.web3.manager.request_blocking(\"eth_mining\", [])\n\n @property\n def hashrate(self):\n return self.web3.manager.request_blocking(\"eth_hashrate\", [])\n\n @property\n def gasPrice(self):\n return self.web3.manager.request_blocking(\"eth_gasPrice\", [])\n\n @property\n def accounts(self):\n return self.web3.manager.request_blocking(\"eth_accounts\", [])\n\n @property\n def blockNumber(self):\n return self.web3.manager.request_blocking(\"eth_blockNumber\", [])\n\n def getBalance(self, account, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getBalance\",\n [account, block_identifier],\n )\n\n def getStorageAt(self, account, position, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getStorageAt\",\n [account, position, block_identifier]\n )\n\n def getCode(self, account, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getCode\",\n [account, block_identifier],\n )\n\n def getBlock(self, block_identifier, full_transactions=False):\n \"\"\"\n `eth_getBlockByHash`\n `eth_getBlockByNumber`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getBlockByNumber',\n if_hash='eth_getBlockByHash',\n if_number='eth_getBlockByNumber',\n )\n\n return self.web3.manager.request_blocking(\n method,\n [block_identifier, full_transactions],\n )\n\n def getBlockTransactionCount(self, block_identifier):\n \"\"\"\n `eth_getBlockTransactionCountByHash`\n `eth_getBlockTransactionCountByNumber`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getBlockTransactionCountByNumber',\n if_hash='eth_getBlockTransactionCountByHash',\n if_number='eth_getBlockTransactionCountByNumber',\n )\n return self.web3.manager.request_blocking(\n method,\n [block_identifier],\n )\n\n def getUncleCount(self, block_identifier):\n \"\"\"\n `eth_getUncleCountByBlockHash`\n `eth_getUncleCountByBlockNumber`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getUncleCountByBlockNumber',\n if_hash='eth_getUncleCountByBlockHash',\n if_number='eth_getUncleCountByBlockNumber',\n )\n return self.web3.manager.request_blocking(\n method,\n [block_identifier],\n )\n\n def getTransaction(self, transaction_hash):\n return self.web3.manager.request_blocking(\n \"eth_getTransactionByHash\",\n [transaction_hash],\n )\n\n def getTransactionFromBlock(self, block_identifier, transaction_index):\n \"\"\"\n `eth_getTransactionByBlockHashAndIndex`\n `eth_getTransactionByBlockNumberAndIndex`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getTransactionByBlockNumberAndIndex',\n if_hash='eth_getTransactionByBlockHashAndIndex',\n if_number='eth_getTransactionByBlockNumberAndIndex',\n )\n return self.web3.manager.request_blocking(\n method,\n [block_identifier, transaction_index],\n )\n\n def getTransactionReceipt(self, transaction_hash):\n return self.web3.manager.request_blocking(\n \"eth_getTransactionReceipt\",\n [transaction_hash],\n )\n\n def getTransactionCount(self, account, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getTransactionCount\",\n [\n account,\n block_identifier,\n ],\n )\n\n def sendTransaction(self, transaction):\n # TODO: move to middleware\n if 'from' not in transaction and is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n\n # TODO: move gas estimation in middleware\n if 'gas' not in transaction:\n transaction = assoc(\n transaction,\n 'gas',\n get_buffered_gas_estimate(self.web3, transaction),\n )\n\n return self.web3.manager.request_blocking(\n \"eth_sendTransaction\",\n [transaction],\n )\n\n def sendRawTransaction(self, raw_transaction):\n return self.web3.manager.request_blocking(\n \"eth_sendRawTransaction\",\n [raw_transaction],\n )\n\n def sign(self, account, data=None, hexstr=None, text=None):\n message_hex = to_hex(data, hexstr=hexstr, text=text)\n return self.web3.manager.request_blocking(\n \"eth_sign\", [account, message_hex],\n )\n\n @staticmethod\n def _recoveryMessageHash(data=None, hexstr=None, text=None):\n message_bytes = to_bytes(data, hexstr=hexstr, text=text)\n recovery_hasher = compose(to_hex, keccak, signature_wrapper)\n return recovery_hasher(message_bytes)\n\n def call(self, transaction, block_identifier=None):\n # TODO: move to middleware\n if 'from' not in transaction and is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n\n # TODO: move to middleware\n if block_identifier is None:\n block_identifier = self.defaultBlock\n\n return self.web3.manager.request_blocking(\n \"eth_call\",\n [transaction, block_identifier],\n )\n\n def estimateGas(self, transaction):\n # TODO: move to middleware\n if is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n\n return self.web3.manager.request_blocking(\n \"eth_estimateGas\",\n [transaction],\n )\n\n def filter(self, filter_params):\n if is_string(filter_params):\n if filter_params == \"latest\":\n filter_id = self.web3.manager.request_blocking(\n \"eth_newBlockFilter\", [],\n )\n return BlockFilter(self.web3, filter_id)\n elif filter_params == \"pending\":\n filter_id = self.web3.manager.request_blocking(\n \"eth_newPendingTransactionFilter\", [],\n )\n return TransactionFilter(self.web3, filter_id)\n else:\n raise ValueError(\n \"The filter API only accepts the values of `pending` or \"\n \"`latest` for string based filters\"\n )\n elif isinstance(filter_params, dict):\n filter_id = self.web3.manager.request_blocking(\n \"eth_newFilter\",\n [filter_params],\n )\n return LogFilter(self.web3, filter_id)\n else:\n raise ValueError(\"Must provide either a string or a valid filter object\")\n\n def getFilterChanges(self, filter_id):\n return self.web3.manager.request_blocking(\n \"eth_getFilterChanges\", [filter_id],\n )\n\n def getFilterLogs(self, filter_id):\n return self.web3.manager.request_blocking(\n \"eth_getFilterLogs\", [filter_id],\n )\n\n def getLogs(self, filter_params):\n raise NotImplementedError(\"Not yet implemented\")\n\n def uninstallFilter(self, filter_id):\n return self.web3.manager.request_blocking(\n \"eth_uninstallFilter\", [filter_id],\n )\n\n def contract(self,\n *args,\n **kwargs):\n ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)\n contract_name = kwargs.pop('contract_name', None)\n\n has_address = any((\n 'address' in kwargs,\n len(args) >= 1 and is_address(args[0]),\n len(args) >= 2 and is_address(args[1]),\n ))\n\n for potential_address in args:\n validate_address_checksum(potential_address)\n\n if has_address:\n if 'address' in kwargs:\n address = kwargs.pop('address')\n elif is_address(args[0]):\n address = args[0]\n elif is_address(args[1]):\n address = args[1]\n kwargs['abi'] = args[0]\n validate_address(address)\n\n return ContractFactoryClass.factory(self.web3, contract_name, **kwargs)(address)\n else:\n try:\n kwargs['abi'] = args[0]\n except IndexError:\n pass\n return ContractFactoryClass.factory(self.web3, contract_name, **kwargs)\n\n def setContractFactory(self, contractFactory):\n self.defaultContractFactory = contractFactory\n\n def getCompilers(self):\n return self.web3.manager.request_blocking(\"eth_getCompilers\", [])\n\n def getWork(self):\n return self.web3.manager.request_blocking(\"eth_getWork\", [])\n", "path": "web3/eth.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom cytoolz import compose\nfrom cytoolz.dicttoolz import (\n assoc,\n)\n\nfrom eth_utils import (\n is_address,\n is_string,\n keccak,\n)\n\nfrom web3.iban import Iban\n\nfrom web3.contract import (\n Contract,\n)\nfrom web3.module import (\n Module,\n)\n\nfrom web3.utils.blocks import (\n select_method_for_block_identifier,\n)\nfrom web3.utils.signing import (\n signature_wrapper,\n)\nfrom web3.utils.empty import (\n empty,\n)\nfrom web3.utils.encoding import (\n to_bytes,\n to_hex,\n)\nfrom web3.utils.filters import (\n BlockFilter,\n TransactionFilter,\n LogFilter,\n)\nfrom web3.utils.transactions import (\n get_buffered_gas_estimate,\n)\nfrom web3.utils.validation import (\n validate_address,\n validate_address_checksum,\n)\n\n\nclass Eth(Module):\n defaultAccount = empty\n defaultBlock = \"latest\"\n defaultContractFactory = Contract\n iban = Iban\n\n def namereg(self):\n raise NotImplementedError()\n\n def icapNamereg(self):\n raise NotImplementedError()\n\n @property\n def protocolVersion(self):\n return self.web3.manager.request_blocking(\"eth_protocolVersion\", [])\n\n @property\n def syncing(self):\n return self.web3.manager.request_blocking(\"eth_syncing\", [])\n\n @property\n def coinbase(self):\n return self.web3.manager.request_blocking(\"eth_coinbase\", [])\n\n @property\n def mining(self):\n return self.web3.manager.request_blocking(\"eth_mining\", [])\n\n @property\n def hashrate(self):\n return self.web3.manager.request_blocking(\"eth_hashrate\", [])\n\n @property\n def gasPrice(self):\n return self.web3.manager.request_blocking(\"eth_gasPrice\", [])\n\n @property\n def accounts(self):\n return self.web3.manager.request_blocking(\"eth_accounts\", [])\n\n @property\n def blockNumber(self):\n return self.web3.manager.request_blocking(\"eth_blockNumber\", [])\n\n def getBalance(self, account, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getBalance\",\n [account, block_identifier],\n )\n\n def getStorageAt(self, account, position, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getStorageAt\",\n [account, position, block_identifier]\n )\n\n def getCode(self, account, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getCode\",\n [account, block_identifier],\n )\n\n def getBlock(self, block_identifier, full_transactions=False):\n \"\"\"\n `eth_getBlockByHash`\n `eth_getBlockByNumber`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getBlockByNumber',\n if_hash='eth_getBlockByHash',\n if_number='eth_getBlockByNumber',\n )\n\n return self.web3.manager.request_blocking(\n method,\n [block_identifier, full_transactions],\n )\n\n def getBlockTransactionCount(self, block_identifier):\n \"\"\"\n `eth_getBlockTransactionCountByHash`\n `eth_getBlockTransactionCountByNumber`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getBlockTransactionCountByNumber',\n if_hash='eth_getBlockTransactionCountByHash',\n if_number='eth_getBlockTransactionCountByNumber',\n )\n return self.web3.manager.request_blocking(\n method,\n [block_identifier],\n )\n\n def getUncleCount(self, block_identifier):\n \"\"\"\n `eth_getUncleCountByBlockHash`\n `eth_getUncleCountByBlockNumber`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getUncleCountByBlockNumber',\n if_hash='eth_getUncleCountByBlockHash',\n if_number='eth_getUncleCountByBlockNumber',\n )\n return self.web3.manager.request_blocking(\n method,\n [block_identifier],\n )\n\n def getTransaction(self, transaction_hash):\n return self.web3.manager.request_blocking(\n \"eth_getTransactionByHash\",\n [transaction_hash],\n )\n\n def getTransactionFromBlock(self, block_identifier, transaction_index):\n \"\"\"\n `eth_getTransactionByBlockHashAndIndex`\n `eth_getTransactionByBlockNumberAndIndex`\n \"\"\"\n method = select_method_for_block_identifier(\n block_identifier,\n if_predefined='eth_getTransactionByBlockNumberAndIndex',\n if_hash='eth_getTransactionByBlockHashAndIndex',\n if_number='eth_getTransactionByBlockNumberAndIndex',\n )\n return self.web3.manager.request_blocking(\n method,\n [block_identifier, transaction_index],\n )\n\n def getTransactionReceipt(self, transaction_hash):\n return self.web3.manager.request_blocking(\n \"eth_getTransactionReceipt\",\n [transaction_hash],\n )\n\n def getTransactionCount(self, account, block_identifier=None):\n if block_identifier is None:\n block_identifier = self.defaultBlock\n return self.web3.manager.request_blocking(\n \"eth_getTransactionCount\",\n [\n account,\n block_identifier,\n ],\n )\n\n def sendTransaction(self, transaction):\n # TODO: move to middleware\n if 'from' not in transaction and is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n\n # TODO: move gas estimation in middleware\n if 'gas' not in transaction:\n transaction = assoc(\n transaction,\n 'gas',\n get_buffered_gas_estimate(self.web3, transaction),\n )\n\n return self.web3.manager.request_blocking(\n \"eth_sendTransaction\",\n [transaction],\n )\n\n def sendRawTransaction(self, raw_transaction):\n return self.web3.manager.request_blocking(\n \"eth_sendRawTransaction\",\n [raw_transaction],\n )\n\n def sign(self, account, data=None, hexstr=None, text=None):\n message_hex = to_hex(data, hexstr=hexstr, text=text)\n return self.web3.manager.request_blocking(\n \"eth_sign\", [account, message_hex],\n )\n\n @staticmethod\n def _recoveryMessageHash(data=None, hexstr=None, text=None):\n message_bytes = to_bytes(data, hexstr=hexstr, text=text)\n recovery_hasher = compose(to_hex, keccak, signature_wrapper)\n return recovery_hasher(message_bytes)\n\n def call(self, transaction, block_identifier=None):\n # TODO: move to middleware\n if 'from' not in transaction and is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n\n # TODO: move to middleware\n if block_identifier is None:\n block_identifier = self.defaultBlock\n\n return self.web3.manager.request_blocking(\n \"eth_call\",\n [transaction, block_identifier],\n )\n\n def estimateGas(self, transaction):\n # TODO: move to middleware\n if 'from' not in transaction and is_address(self.defaultAccount):\n transaction = assoc(transaction, 'from', self.defaultAccount)\n\n return self.web3.manager.request_blocking(\n \"eth_estimateGas\",\n [transaction],\n )\n\n def filter(self, filter_params):\n if is_string(filter_params):\n if filter_params == \"latest\":\n filter_id = self.web3.manager.request_blocking(\n \"eth_newBlockFilter\", [],\n )\n return BlockFilter(self.web3, filter_id)\n elif filter_params == \"pending\":\n filter_id = self.web3.manager.request_blocking(\n \"eth_newPendingTransactionFilter\", [],\n )\n return TransactionFilter(self.web3, filter_id)\n else:\n raise ValueError(\n \"The filter API only accepts the values of `pending` or \"\n \"`latest` for string based filters\"\n )\n elif isinstance(filter_params, dict):\n filter_id = self.web3.manager.request_blocking(\n \"eth_newFilter\",\n [filter_params],\n )\n return LogFilter(self.web3, filter_id)\n else:\n raise ValueError(\"Must provide either a string or a valid filter object\")\n\n def getFilterChanges(self, filter_id):\n return self.web3.manager.request_blocking(\n \"eth_getFilterChanges\", [filter_id],\n )\n\n def getFilterLogs(self, filter_id):\n return self.web3.manager.request_blocking(\n \"eth_getFilterLogs\", [filter_id],\n )\n\n def getLogs(self, filter_params):\n raise NotImplementedError(\"Not yet implemented\")\n\n def uninstallFilter(self, filter_id):\n return self.web3.manager.request_blocking(\n \"eth_uninstallFilter\", [filter_id],\n )\n\n def contract(self,\n *args,\n **kwargs):\n ContractFactoryClass = kwargs.pop('ContractFactoryClass', self.defaultContractFactory)\n contract_name = kwargs.pop('contract_name', None)\n\n has_address = any((\n 'address' in kwargs,\n len(args) >= 1 and is_address(args[0]),\n len(args) >= 2 and is_address(args[1]),\n ))\n\n for potential_address in args:\n validate_address_checksum(potential_address)\n\n if has_address:\n if 'address' in kwargs:\n address = kwargs.pop('address')\n elif is_address(args[0]):\n address = args[0]\n elif is_address(args[1]):\n address = args[1]\n kwargs['abi'] = args[0]\n validate_address(address)\n\n return ContractFactoryClass.factory(self.web3, contract_name, **kwargs)(address)\n else:\n try:\n kwargs['abi'] = args[0]\n except IndexError:\n pass\n return ContractFactoryClass.factory(self.web3, contract_name, **kwargs)\n\n def setContractFactory(self, contractFactory):\n self.defaultContractFactory = contractFactory\n\n def getCompilers(self):\n return self.web3.manager.request_blocking(\"eth_getCompilers\", [])\n\n def getWork(self):\n return self.web3.manager.request_blocking(\"eth_getWork\", [])\n", "path": "web3/eth.py"}]}
| 3,436 | 111 |
gh_patches_debug_24334
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-3688
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[reply refactor] Allow journalists to download replies from journalist interface
After #3673 is implemented, we should allow journalists to download replies from the journalist interface UI. Note that for long-running SecureDrop instances, there will be old replies encrypted only to the source key that should be unavailable for download.
Epic: #3097
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist_app/col.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from flask import (Blueprint, redirect, url_for, render_template, flash,
4 request, abort, send_file, current_app)
5 from flask_babel import gettext
6 from sqlalchemy.orm.exc import NoResultFound
7
8 from db import db
9 from models import Submission
10 from journalist_app.forms import ReplyForm
11 from journalist_app.utils import (make_star_true, make_star_false, get_source,
12 delete_collection, col_download_unread,
13 col_download_all, col_star, col_un_star,
14 col_delete)
15
16
17 def make_blueprint(config):
18 view = Blueprint('col', __name__)
19
20 @view.route('/add_star/<filesystem_id>', methods=('POST',))
21 def add_star(filesystem_id):
22 make_star_true(filesystem_id)
23 db.session.commit()
24 return redirect(url_for('main.index'))
25
26 @view.route("/remove_star/<filesystem_id>", methods=('POST',))
27 def remove_star(filesystem_id):
28 make_star_false(filesystem_id)
29 db.session.commit()
30 return redirect(url_for('main.index'))
31
32 @view.route('/<filesystem_id>')
33 def col(filesystem_id):
34 form = ReplyForm()
35 source = get_source(filesystem_id)
36 source.has_key = current_app.crypto_util.getkey(filesystem_id)
37 return render_template("col.html", filesystem_id=filesystem_id,
38 source=source, form=form)
39
40 @view.route('/delete/<filesystem_id>', methods=('POST',))
41 def delete_single(filesystem_id):
42 """deleting a single collection from its /col page"""
43 source = get_source(filesystem_id)
44 delete_collection(filesystem_id)
45 flash(gettext("{source_name}'s collection deleted")
46 .format(source_name=source.journalist_designation),
47 "notification")
48 return redirect(url_for('main.index'))
49
50 @view.route('/process', methods=('POST',))
51 def process():
52 actions = {'download-unread': col_download_unread,
53 'download-all': col_download_all, 'star': col_star,
54 'un-star': col_un_star, 'delete': col_delete}
55 if 'cols_selected' not in request.form:
56 flash(gettext('No collections selected.'), 'error')
57 return redirect(url_for('main.index'))
58
59 # getlist is cgi.FieldStorage.getlist
60 cols_selected = request.form.getlist('cols_selected')
61 action = request.form['action']
62
63 if action not in actions:
64 return abort(500)
65
66 method = actions[action]
67 return method(cols_selected)
68
69 @view.route('/<filesystem_id>/<fn>')
70 def download_single_submission(filesystem_id, fn):
71 """Sends a client the contents of a single submission."""
72 if '..' in fn or fn.startswith('/'):
73 abort(404)
74
75 try:
76 Submission.query.filter(
77 Submission.filename == fn).one().downloaded = True
78 db.session.commit()
79 except NoResultFound as e:
80 current_app.logger.error(
81 "Could not mark " + fn + " as downloaded: %s" % (e,))
82
83 return send_file(current_app.storage.path(filesystem_id, fn),
84 mimetype="application/pgp-encrypted")
85
86 return view
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py
--- a/securedrop/journalist_app/col.py
+++ b/securedrop/journalist_app/col.py
@@ -67,18 +67,21 @@
return method(cols_selected)
@view.route('/<filesystem_id>/<fn>')
- def download_single_submission(filesystem_id, fn):
- """Sends a client the contents of a single submission."""
+ def download_single_file(filesystem_id, fn):
+ """Sends a client the contents of a single file, either a submission
+ or a journalist reply"""
if '..' in fn or fn.startswith('/'):
abort(404)
- try:
- Submission.query.filter(
- Submission.filename == fn).one().downloaded = True
- db.session.commit()
- except NoResultFound as e:
- current_app.logger.error(
- "Could not mark " + fn + " as downloaded: %s" % (e,))
+ # only mark as read when it's a submission (and not a journalist reply)
+ if not fn.endswith('reply.gpg'):
+ try:
+ Submission.query.filter(
+ Submission.filename == fn).one().downloaded = True
+ db.session.commit()
+ except NoResultFound as e:
+ current_app.logger.error(
+ "Could not mark " + fn + " as downloaded: %s" % (e,))
return send_file(current_app.storage.path(filesystem_id, fn),
mimetype="application/pgp-encrypted")
|
{"golden_diff": "diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py\n--- a/securedrop/journalist_app/col.py\n+++ b/securedrop/journalist_app/col.py\n@@ -67,18 +67,21 @@\n return method(cols_selected)\n \n @view.route('/<filesystem_id>/<fn>')\n- def download_single_submission(filesystem_id, fn):\n- \"\"\"Sends a client the contents of a single submission.\"\"\"\n+ def download_single_file(filesystem_id, fn):\n+ \"\"\"Sends a client the contents of a single file, either a submission\n+ or a journalist reply\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n \n- try:\n- Submission.query.filter(\n- Submission.filename == fn).one().downloaded = True\n- db.session.commit()\n- except NoResultFound as e:\n- current_app.logger.error(\n- \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n+ # only mark as read when it's a submission (and not a journalist reply)\n+ if not fn.endswith('reply.gpg'):\n+ try:\n+ Submission.query.filter(\n+ Submission.filename == fn).one().downloaded = True\n+ db.session.commit()\n+ except NoResultFound as e:\n+ current_app.logger.error(\n+ \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n \n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n", "issue": "[reply refactor] Allow journalists to download replies from journalist interface\nAfter #3673 is implemented, we should allow journalists to download replies from the journalist interface UI. Note that for long-running SecureDrop instances, there will be old replies encrypted only to the source key that should be unavailable for download.\r\n\r\nEpic: #3097\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, redirect, url_for, render_template, flash,\n request, abort, send_file, current_app)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete)\n\n\ndef make_blueprint(config):\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id):\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id):\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id):\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.getkey(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n delete_collection(filesystem_id)\n flash(gettext(\"{source_name}'s collection deleted\")\n .format(source_name=source.journalist_designation),\n \"notification\")\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process():\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete}\n if 'cols_selected' not in request.form:\n flash(gettext('No collections selected.'), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_submission(filesystem_id, fn):\n \"\"\"Sends a client the contents of a single submission.\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n try:\n Submission.query.filter(\n Submission.filename == fn).one().downloaded = True\n db.session.commit()\n except NoResultFound as e:\n current_app.logger.error(\n \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, redirect, url_for, render_template, flash,\n request, abort, send_file, current_app)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete)\n\n\ndef make_blueprint(config):\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id):\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id):\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id):\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.getkey(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n delete_collection(filesystem_id)\n flash(gettext(\"{source_name}'s collection deleted\")\n .format(source_name=source.journalist_designation),\n \"notification\")\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process():\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete}\n if 'cols_selected' not in request.form:\n flash(gettext('No collections selected.'), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_file(filesystem_id, fn):\n \"\"\"Sends a client the contents of a single file, either a submission\n or a journalist reply\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n # only mark as read when it's a submission (and not a journalist reply)\n if not fn.endswith('reply.gpg'):\n try:\n Submission.query.filter(\n Submission.filename == fn).one().downloaded = True\n db.session.commit()\n except NoResultFound as e:\n current_app.logger.error(\n \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}]}
| 1,181 | 354 |
gh_patches_debug_42292
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-178
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add metrics for counting to eval.py
We should add a new metric to the detection evaluation script so that it computes how close the counts are compared to ground truth.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/rv/detection/commands/eval_predictions.py`
Content:
```
1 import json
2 from os.path import join
3
4 import numpy as np
5 import rasterio
6 import click
7
8 from object_detection.utils import object_detection_evaluation, label_map_util
9
10 from rv.utils import (
11 download_if_needed, make_empty_dir, get_local_path, upload_if_needed,
12 get_boxes_from_geojson, download_and_build_vrt)
13 from rv.detection.commands.settings import max_num_classes, temp_root_dir
14
15
16 def get_eval_result(ground_truth_path, predictions_path, image_dataset):
17 gt_boxes, gt_classes, _ = \
18 get_boxes_from_geojson(ground_truth_path, image_dataset)
19 # Subtract one because class id's start at 1, but evaluation api assumes
20 # the start at 0. You might think we could just write the label_map.pbtxt
21 # so the class ids start at 0, but that throws an exception.
22 gt_classes -= 1
23
24 pred_boxes, pred_classes, pred_scores = \
25 get_boxes_from_geojson(predictions_path, image_dataset)
26 pred_classes -= 1
27
28 nb_gt_classes = len(set(gt_classes))
29 od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
30 nb_gt_classes, matching_iou_threshold=0.1)
31 image_key = 'image'
32 od_eval.add_single_ground_truth_image_info(
33 image_key, gt_boxes, gt_classes)
34 od_eval.add_single_detected_image_info(
35 image_key, pred_boxes, pred_scores, pred_classes)
36
37 od_eval.evaluate()
38 return od_eval.get_eval_result()
39
40
41 def write_results(output_path, label_map_path, eval_result):
42 label_map = label_map_util.load_labelmap(label_map_path)
43 categories = label_map_util.convert_label_map_to_categories(
44 label_map, max_num_classes=max_num_classes, use_display_name=True)
45 category_index = label_map_util.create_category_index(categories)
46
47 results = []
48 for class_id in range(1, len(category_index) + 1):
49 class_name = category_index[class_id]['name']
50 # Subtract one to account for fact that class id's start at 1.
51 # precisions and recalls are lists with one element for each
52 # predicted box, assuming they are sorted by score. Each element is
53 # the precision or recall assuming that all predicted boxes with that
54 # score or above are used. So, the last element is the value assuming
55 # that all predictions are used.
56
57 precisions = eval_result.precisions[class_id - 1]
58 recalls = eval_result.recalls[class_id - 1]
59 # Get precision and recall assuming all predicted boxes are used.
60 class_results = {
61 'name': class_name,
62 'precision': precisions[-1],
63 'recall': recalls[-1]
64 }
65 results.append(class_results)
66
67 with open(output_path, 'w') as output_file:
68 output_file.write(json.dumps(results, indent=4))
69
70
71 def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,
72 predictions_uri, output_uri):
73 temp_dir = join(temp_root_dir, 'eval_predictions')
74 make_empty_dir(temp_dir)
75
76 image_path = download_and_build_vrt(temp_dir, image_uris)
77 image_dataset = rasterio.open(image_path)
78
79 ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)
80 predictions_path = download_if_needed(temp_dir, predictions_uri)
81 label_map_path = download_if_needed(temp_dir, label_map_uri)
82
83 eval_result = get_eval_result(
84 ground_truth_path, predictions_path, image_dataset)
85
86 output_path = get_local_path(temp_dir, output_uri)
87 write_results(output_path, label_map_path, eval_result)
88 upload_if_needed(output_path, output_uri)
89
90
91 @click.command()
92 @click.argument('image_uris', nargs=-1)
93 @click.argument('label_map_uri')
94 @click.argument('ground_truth_uri')
95 @click.argument('predictions_uri')
96 @click.argument('output_uri')
97 def eval_predictions(image_uris, label_map_uri, ground_truth_uri,
98 predictions_uri, output_uri):
99 """Evaluate predictions against ground truth for a single predictions file.
100
101 Args:
102 ground_truth_uri: GeoJSON file with ground truth bounding boxes
103 predictions_uri: GeoJSON file with predicted bounding boxes
104 output_uri: JSON file with metrics
105 """
106 _eval_predictions(image_uris, label_map_uri, ground_truth_uri,
107 predictions_uri, output_uri)
108
109
110 if __name__ == '__main__':
111 eval_predictions()
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/rv/detection/commands/eval_predictions.py b/src/rv/detection/commands/eval_predictions.py
--- a/src/rv/detection/commands/eval_predictions.py
+++ b/src/rv/detection/commands/eval_predictions.py
@@ -1,7 +1,6 @@
import json
-from os.path import join
+from os.path import join, dirname
-import numpy as np
import rasterio
import click
@@ -13,7 +12,7 @@
from rv.detection.commands.settings import max_num_classes, temp_root_dir
-def get_eval_result(ground_truth_path, predictions_path, image_dataset):
+def get_od_eval(ground_truth_path, predictions_path, image_dataset):
gt_boxes, gt_classes, _ = \
get_boxes_from_geojson(ground_truth_path, image_dataset)
# Subtract one because class id's start at 1, but evaluation api assumes
@@ -35,10 +34,12 @@
image_key, pred_boxes, pred_scores, pred_classes)
od_eval.evaluate()
- return od_eval.get_eval_result()
+ return od_eval
-def write_results(output_path, label_map_path, eval_result):
+def write_results(output_path, label_map_path, od_eval):
+ make_empty_dir(dirname(output_path), empty_dir=False)
+
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=max_num_classes, use_display_name=True)
@@ -53,19 +54,30 @@
# the precision or recall assuming that all predicted boxes with that
# score or above are used. So, the last element is the value assuming
# that all predictions are used.
-
+ eval_result = od_eval.get_eval_result()
precisions = eval_result.precisions[class_id - 1]
recalls = eval_result.recalls[class_id - 1]
# Get precision and recall assuming all predicted boxes are used.
+ precision = precisions[-1]
+ recall = recalls[-1]
+ f1 = (2 * precision * recall) / (precision + recall)
+
+ gt_count = od_eval.num_gt_instances_per_class[class_id -1]
+ pred_count = len(recalls)
+ count_error = pred_count - gt_count
+ norm_count_error = count_error / gt_count
+
class_results = {
'name': class_name,
- 'precision': precisions[-1],
- 'recall': recalls[-1]
+ 'precision': precision,
+ 'recall': recall,
+ 'f1': f1,
+ 'norm_count_error': norm_count_error
}
results.append(class_results)
with open(output_path, 'w') as output_file:
- output_file.write(json.dumps(results, indent=4))
+ output_file.write(json.dumps(results, indent=4, sort_keys=True))
def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,
@@ -80,11 +92,11 @@
predictions_path = download_if_needed(temp_dir, predictions_uri)
label_map_path = download_if_needed(temp_dir, label_map_uri)
- eval_result = get_eval_result(
+ od_eval = get_od_eval(
ground_truth_path, predictions_path, image_dataset)
output_path = get_local_path(temp_dir, output_uri)
- write_results(output_path, label_map_path, eval_result)
+ write_results(output_path, label_map_path, od_eval)
upload_if_needed(output_path, output_uri)
|
{"golden_diff": "diff --git a/src/rv/detection/commands/eval_predictions.py b/src/rv/detection/commands/eval_predictions.py\n--- a/src/rv/detection/commands/eval_predictions.py\n+++ b/src/rv/detection/commands/eval_predictions.py\n@@ -1,7 +1,6 @@\n import json\n-from os.path import join\n+from os.path import join, dirname\n \n-import numpy as np\n import rasterio\n import click\n \n@@ -13,7 +12,7 @@\n from rv.detection.commands.settings import max_num_classes, temp_root_dir\n \n \n-def get_eval_result(ground_truth_path, predictions_path, image_dataset):\n+def get_od_eval(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n@@ -35,10 +34,12 @@\n image_key, pred_boxes, pred_scores, pred_classes)\n \n od_eval.evaluate()\n- return od_eval.get_eval_result()\n+ return od_eval\n \n \n-def write_results(output_path, label_map_path, eval_result):\n+def write_results(output_path, label_map_path, od_eval):\n+ make_empty_dir(dirname(output_path), empty_dir=False)\n+\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n@@ -53,19 +54,30 @@\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n-\n+ eval_result = od_eval.get_eval_result()\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n+ precision = precisions[-1]\n+ recall = recalls[-1]\n+ f1 = (2 * precision * recall) / (precision + recall)\n+\n+ gt_count = od_eval.num_gt_instances_per_class[class_id -1]\n+ pred_count = len(recalls)\n+ count_error = pred_count - gt_count\n+ norm_count_error = count_error / gt_count\n+\n class_results = {\n 'name': class_name,\n- 'precision': precisions[-1],\n- 'recall': recalls[-1]\n+ 'precision': precision,\n+ 'recall': recall,\n+ 'f1': f1,\n+ 'norm_count_error': norm_count_error\n }\n results.append(class_results)\n \n with open(output_path, 'w') as output_file:\n- output_file.write(json.dumps(results, indent=4))\n+ output_file.write(json.dumps(results, indent=4, sort_keys=True))\n \n \n def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n@@ -80,11 +92,11 @@\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n \n- eval_result = get_eval_result(\n+ od_eval = get_od_eval(\n ground_truth_path, predictions_path, image_dataset)\n \n output_path = get_local_path(temp_dir, output_uri)\n- write_results(output_path, label_map_path, eval_result)\n+ write_results(output_path, label_map_path, od_eval)\n upload_if_needed(output_path, output_uri)\n", "issue": "Add metrics for counting to eval.py\nWe should add a new metric to the detection evaluation script so that it computes how close the counts are compared to ground truth.\n", "before_files": [{"content": "import json\nfrom os.path import join\n\nimport numpy as np\nimport rasterio\nimport click\n\nfrom object_detection.utils import object_detection_evaluation, label_map_util\n\nfrom rv.utils import (\n download_if_needed, make_empty_dir, get_local_path, upload_if_needed,\n get_boxes_from_geojson, download_and_build_vrt)\nfrom rv.detection.commands.settings import max_num_classes, temp_root_dir\n\n\ndef get_eval_result(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n # the start at 0. You might think we could just write the label_map.pbtxt\n # so the class ids start at 0, but that throws an exception.\n gt_classes -= 1\n\n pred_boxes, pred_classes, pred_scores = \\\n get_boxes_from_geojson(predictions_path, image_dataset)\n pred_classes -= 1\n\n nb_gt_classes = len(set(gt_classes))\n od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n nb_gt_classes, matching_iou_threshold=0.1)\n image_key = 'image'\n od_eval.add_single_ground_truth_image_info(\n image_key, gt_boxes, gt_classes)\n od_eval.add_single_detected_image_info(\n image_key, pred_boxes, pred_scores, pred_classes)\n\n od_eval.evaluate()\n return od_eval.get_eval_result()\n\n\ndef write_results(output_path, label_map_path, eval_result):\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n results = []\n for class_id in range(1, len(category_index) + 1):\n class_name = category_index[class_id]['name']\n # Subtract one to account for fact that class id's start at 1.\n # precisions and recalls are lists with one element for each\n # predicted box, assuming they are sorted by score. Each element is\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n class_results = {\n 'name': class_name,\n 'precision': precisions[-1],\n 'recall': recalls[-1]\n }\n results.append(class_results)\n\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(results, indent=4))\n\n\ndef _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n temp_dir = join(temp_root_dir, 'eval_predictions')\n make_empty_dir(temp_dir)\n\n image_path = download_and_build_vrt(temp_dir, image_uris)\n image_dataset = rasterio.open(image_path)\n\n ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n\n eval_result = get_eval_result(\n ground_truth_path, predictions_path, image_dataset)\n\n output_path = get_local_path(temp_dir, output_uri)\n write_results(output_path, label_map_path, eval_result)\n upload_if_needed(output_path, output_uri)\n\n\[email protected]()\[email protected]('image_uris', nargs=-1)\[email protected]('label_map_uri')\[email protected]('ground_truth_uri')\[email protected]('predictions_uri')\[email protected]('output_uri')\ndef eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n \"\"\"Evaluate predictions against ground truth for a single predictions file.\n\n Args:\n ground_truth_uri: GeoJSON file with ground truth bounding boxes\n predictions_uri: GeoJSON file with predicted bounding boxes\n output_uri: JSON file with metrics\n \"\"\"\n _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri)\n\n\nif __name__ == '__main__':\n eval_predictions()\n", "path": "src/rv/detection/commands/eval_predictions.py"}], "after_files": [{"content": "import json\nfrom os.path import join, dirname\n\nimport rasterio\nimport click\n\nfrom object_detection.utils import object_detection_evaluation, label_map_util\n\nfrom rv.utils import (\n download_if_needed, make_empty_dir, get_local_path, upload_if_needed,\n get_boxes_from_geojson, download_and_build_vrt)\nfrom rv.detection.commands.settings import max_num_classes, temp_root_dir\n\n\ndef get_od_eval(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n # the start at 0. You might think we could just write the label_map.pbtxt\n # so the class ids start at 0, but that throws an exception.\n gt_classes -= 1\n\n pred_boxes, pred_classes, pred_scores = \\\n get_boxes_from_geojson(predictions_path, image_dataset)\n pred_classes -= 1\n\n nb_gt_classes = len(set(gt_classes))\n od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n nb_gt_classes, matching_iou_threshold=0.1)\n image_key = 'image'\n od_eval.add_single_ground_truth_image_info(\n image_key, gt_boxes, gt_classes)\n od_eval.add_single_detected_image_info(\n image_key, pred_boxes, pred_scores, pred_classes)\n\n od_eval.evaluate()\n return od_eval\n\n\ndef write_results(output_path, label_map_path, od_eval):\n make_empty_dir(dirname(output_path), empty_dir=False)\n\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n results = []\n for class_id in range(1, len(category_index) + 1):\n class_name = category_index[class_id]['name']\n # Subtract one to account for fact that class id's start at 1.\n # precisions and recalls are lists with one element for each\n # predicted box, assuming they are sorted by score. Each element is\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n eval_result = od_eval.get_eval_result()\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n precision = precisions[-1]\n recall = recalls[-1]\n f1 = (2 * precision * recall) / (precision + recall)\n\n gt_count = od_eval.num_gt_instances_per_class[class_id -1]\n pred_count = len(recalls)\n count_error = pred_count - gt_count\n norm_count_error = count_error / gt_count\n\n class_results = {\n 'name': class_name,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'norm_count_error': norm_count_error\n }\n results.append(class_results)\n\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(results, indent=4, sort_keys=True))\n\n\ndef _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n temp_dir = join(temp_root_dir, 'eval_predictions')\n make_empty_dir(temp_dir)\n\n image_path = download_and_build_vrt(temp_dir, image_uris)\n image_dataset = rasterio.open(image_path)\n\n ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n\n od_eval = get_od_eval(\n ground_truth_path, predictions_path, image_dataset)\n\n output_path = get_local_path(temp_dir, output_uri)\n write_results(output_path, label_map_path, od_eval)\n upload_if_needed(output_path, output_uri)\n\n\[email protected]()\[email protected]('image_uris', nargs=-1)\[email protected]('label_map_uri')\[email protected]('ground_truth_uri')\[email protected]('predictions_uri')\[email protected]('output_uri')\ndef eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n \"\"\"Evaluate predictions against ground truth for a single predictions file.\n\n Args:\n ground_truth_uri: GeoJSON file with ground truth bounding boxes\n predictions_uri: GeoJSON file with predicted bounding boxes\n output_uri: JSON file with metrics\n \"\"\"\n _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri)\n\n\nif __name__ == '__main__':\n eval_predictions()\n", "path": "src/rv/detection/commands/eval_predictions.py"}]}
| 1,475 | 780 |
gh_patches_debug_7082
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-2494
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Rollback doesn't work as described in the documentation
### Description
Rollback doesn't work properly as described in the documentation.
### Steps to reproduce
1. Go to 'https://docs.paperless-ngx.com/setup/#moving-back-to-paperless'
2. Open a terminal from the container and execute the command like mentioned at 'Or without docker:'
3. Error
### Webserver logs
```bash
root@ad747fe4e9e8:/usr/src/paperless/src# python3 manage.py migrate documents 0023
Operations to perform:
Target specific migration: 0023_document_current_filename, from documents
Running migrations:
Rendering model states... DONE
Unapplying paperless_mail.0016_mailrule_consumption_scope... OK
Unapplying paperless_mail.0015_alter_mailrule_action... OK
Unapplying paperless_mail.0014_alter_mailrule_action... OK
Unapplying paperless_mail.0013_merge_20220412_1051... OK
Unapplying paperless_mail.0012_alter_mailrule_assign_tags... OK
Unapplying paperless_mail.0011_remove_mailrule_assign_tag... OK
Unapplying paperless_mail.0010_auto_20220311_1602... OK
Unapplying paperless_mail.0009_mailrule_assign_tags... OK
Unapplying paperless_mail.0009_alter_mailrule_action_alter_mailrule_folder... OK
Unapplying paperless_mail.0008_auto_20210516_0940... OK
Unapplying paperless_mail.0007_auto_20210106_0138... OK
Unapplying paperless_mail.0006_auto_20210101_2340... OK
Unapplying paperless_mail.0005_help_texts... OK
Unapplying paperless_mail.0004_mailrule_order... OK
Unapplying paperless_mail.0003_auto_20201118_1940... OK
Unapplying paperless_mail.0002_auto_20201117_1334... OK
Unapplying paperless_mail.0001_initial... OK
Unapplying documents.1028_remove_paperlesstask_task_args_and_more... OK
Unapplying documents.1027_remove_paperlesstask_attempted_task_and_more... OK
Unapplying documents.1026_transition_to_celery...Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/base.py", line 357, in execute
return Database.Cursor.execute(self, query, params)
sqlite3.IntegrityError: NOT NULL constraint failed: new__documents_paperlesstask.name
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/src/paperless/src/manage.py", line 11, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 440, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 402, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 448, in execute
output = self.handle(*args, **options)
File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 96, in wrapped
res = handle_func(*args, **kwargs)
File "/usr/local/lib/python3.9/site-packages/django/core/management/commands/migrate.py", line 349, in handle
post_migrate_state = executor.migrate(
File "/usr/local/lib/python3.9/site-packages/django/db/migrations/executor.py", line 141, in migrate
state = self._migrate_all_backwards(plan, full_plan, fake=fake)
File "/usr/local/lib/python3.9/site-packages/django/db/migrations/executor.py", line 219, in _migrate_all_backwards
self.unapply_migration(states[migration], migration, fake=fake)
File "/usr/local/lib/python3.9/site-packages/django/db/migrations/executor.py", line 279, in unapply_migration
state = migration.unapply(state, schema_editor)
File "/usr/local/lib/python3.9/site-packages/django/db/migrations/migration.py", line 191, in unapply
operation.database_backwards(
File "/usr/local/lib/python3.9/site-packages/django/db/migrations/operations/fields.py", line 178, in database_backwards
schema_editor.add_field(from_model, to_model._meta.get_field(self.name))
File "/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/schema.py", line 397, in add_field
self._remake_table(model, create_field=field)
File "/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/schema.py", line 333, in _remake_table
self.execute(
File "/usr/local/lib/python3.9/site-packages/django/db/backends/base/schema.py", line 199, in execute
cursor.execute(sql, params)
File "/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py", line 67, in execute
return self._execute_with_wrappers(
File "/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py", line 80, in _execute_with_wrappers
return executor(sql, params, many, context)
File "/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.9/site-packages/django/db/utils.py", line 91, in __exit__
raise dj_exc_value.with_traceback(traceback) from exc_value
File "/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py", line 89, in _execute
return self.cursor.execute(sql, params)
File "/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/base.py", line 357, in execute
return Database.Cursor.execute(self, query, params)
django.db.utils.IntegrityError: NOT NULL constraint failed: new__documents_paperlesstask.name
```
### Browser logs
_No response_
### Paperless-ngx version
1.11.3
### Host OS
Unraid 6.11.5
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/migrations/1022_paperlesstask.py`
Content:
```
1 # Generated by Django 4.0.4 on 2022-05-23 07:14
2
3 from django.db import migrations, models
4 import django.db.models.deletion
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ("documents", "1021_webp_thumbnail_conversion"),
11 ]
12
13 operations = [
14 migrations.CreateModel(
15 name="PaperlessTask",
16 fields=[
17 (
18 "id",
19 models.AutoField(
20 auto_created=True,
21 primary_key=True,
22 serialize=False,
23 verbose_name="ID",
24 ),
25 ),
26 ("task_id", models.CharField(max_length=128)),
27 ("name", models.CharField(max_length=256)),
28 (
29 "created",
30 models.DateTimeField(auto_now=True, verbose_name="created"),
31 ),
32 (
33 "started",
34 models.DateTimeField(null=True, verbose_name="started"),
35 ),
36 ("acknowledged", models.BooleanField(default=False)),
37 (
38 "attempted_task",
39 models.OneToOneField(
40 blank=True,
41 null=True,
42 on_delete=django.db.models.deletion.CASCADE,
43 related_name="attempted_task",
44 # This is a dummy field, 1026 will fix up the column
45 # This manual change is required, as django doesn't django doesn't really support
46 # removing an app which has migration deps like this
47 to="documents.document",
48 ),
49 ),
50 ],
51 )
52 ]
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/documents/migrations/1022_paperlesstask.py b/src/documents/migrations/1022_paperlesstask.py
--- a/src/documents/migrations/1022_paperlesstask.py
+++ b/src/documents/migrations/1022_paperlesstask.py
@@ -24,7 +24,7 @@
),
),
("task_id", models.CharField(max_length=128)),
- ("name", models.CharField(max_length=256)),
+ ("name", models.CharField(max_length=256, null=True)),
(
"created",
models.DateTimeField(auto_now=True, verbose_name="created"),
|
{"golden_diff": "diff --git a/src/documents/migrations/1022_paperlesstask.py b/src/documents/migrations/1022_paperlesstask.py\n--- a/src/documents/migrations/1022_paperlesstask.py\n+++ b/src/documents/migrations/1022_paperlesstask.py\n@@ -24,7 +24,7 @@\n ),\n ),\n (\"task_id\", models.CharField(max_length=128)),\n- (\"name\", models.CharField(max_length=256)),\n+ (\"name\", models.CharField(max_length=256, null=True)),\n (\n \"created\",\n models.DateTimeField(auto_now=True, verbose_name=\"created\"),\n", "issue": "[BUG] Rollback doesn't work as described in the documentation\n### Description\n\nRollback doesn't work properly as described in the documentation.\n\n### Steps to reproduce\n\n1. Go to 'https://docs.paperless-ngx.com/setup/#moving-back-to-paperless'\r\n2. Open a terminal from the container and execute the command like mentioned at 'Or without docker:'\r\n3. Error\n\n### Webserver logs\n\n```bash\nroot@ad747fe4e9e8:/usr/src/paperless/src# python3 manage.py migrate documents 0023\r\nOperations to perform:\r\n Target specific migration: 0023_document_current_filename, from documents\r\nRunning migrations:\r\n Rendering model states... DONE\r\n Unapplying paperless_mail.0016_mailrule_consumption_scope... OK\r\n Unapplying paperless_mail.0015_alter_mailrule_action... OK\r\n Unapplying paperless_mail.0014_alter_mailrule_action... OK\r\n Unapplying paperless_mail.0013_merge_20220412_1051... OK\r\n Unapplying paperless_mail.0012_alter_mailrule_assign_tags... OK\r\n Unapplying paperless_mail.0011_remove_mailrule_assign_tag... OK\r\n Unapplying paperless_mail.0010_auto_20220311_1602... OK\r\n Unapplying paperless_mail.0009_mailrule_assign_tags... OK\r\n Unapplying paperless_mail.0009_alter_mailrule_action_alter_mailrule_folder... OK\r\n Unapplying paperless_mail.0008_auto_20210516_0940... OK\r\n Unapplying paperless_mail.0007_auto_20210106_0138... OK\r\n Unapplying paperless_mail.0006_auto_20210101_2340... OK\r\n Unapplying paperless_mail.0005_help_texts... OK\r\n Unapplying paperless_mail.0004_mailrule_order... OK\r\n Unapplying paperless_mail.0003_auto_20201118_1940... OK\r\n Unapplying paperless_mail.0002_auto_20201117_1334... OK\r\n Unapplying paperless_mail.0001_initial... OK\r\n Unapplying documents.1028_remove_paperlesstask_task_args_and_more... OK\r\n Unapplying documents.1027_remove_paperlesstask_attempted_task_and_more... OK\r\n Unapplying documents.1026_transition_to_celery...Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/base.py\", line 357, in execute\r\n return Database.Cursor.execute(self, query, params)\r\nsqlite3.IntegrityError: NOT NULL constraint failed: new__documents_paperlesstask.name\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/src/paperless/src/manage.py\", line 11, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 446, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 440, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 402, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 448, in execute\r\n output = self.handle(*args, **options)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 96, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/commands/migrate.py\", line 349, in handle\r\n post_migrate_state = executor.migrate(\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 141, in migrate\r\n state = self._migrate_all_backwards(plan, full_plan, fake=fake)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 219, in _migrate_all_backwards\r\n self.unapply_migration(states[migration], migration, fake=fake)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 279, in unapply_migration\r\n state = migration.unapply(state, schema_editor)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/migrations/migration.py\", line 191, in unapply\r\n operation.database_backwards(\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/migrations/operations/fields.py\", line 178, in database_backwards\r\n schema_editor.add_field(from_model, to_model._meta.get_field(self.name))\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/schema.py\", line 397, in add_field\r\n self._remake_table(model, create_field=field)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/schema.py\", line 333, in _remake_table\r\n self.execute(\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/base/schema.py\", line 199, in execute\r\n cursor.execute(sql, params)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py\", line 67, in execute\r\n return self._execute_with_wrappers(\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py\", line 80, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/utils.py\", line 91, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/utils.py\", line 89, in _execute\r\n return self.cursor.execute(sql, params)\r\n File \"/usr/local/lib/python3.9/site-packages/django/db/backends/sqlite3/base.py\", line 357, in execute\r\n return Database.Cursor.execute(self, query, params)\r\ndjango.db.utils.IntegrityError: NOT NULL constraint failed: new__documents_paperlesstask.name\n```\n\n\n### Browser logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.11.3\n\n### Host OS\n\nUnraid 6.11.5\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nFirefox\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "# Generated by Django 4.0.4 on 2022-05-23 07:14\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"documents\", \"1021_webp_thumbnail_conversion\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"PaperlessTask\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"task_id\", models.CharField(max_length=128)),\n (\"name\", models.CharField(max_length=256)),\n (\n \"created\",\n models.DateTimeField(auto_now=True, verbose_name=\"created\"),\n ),\n (\n \"started\",\n models.DateTimeField(null=True, verbose_name=\"started\"),\n ),\n (\"acknowledged\", models.BooleanField(default=False)),\n (\n \"attempted_task\",\n models.OneToOneField(\n blank=True,\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attempted_task\",\n # This is a dummy field, 1026 will fix up the column\n # This manual change is required, as django doesn't django doesn't really support\n # removing an app which has migration deps like this\n to=\"documents.document\",\n ),\n ),\n ],\n )\n ]\n", "path": "src/documents/migrations/1022_paperlesstask.py"}], "after_files": [{"content": "# Generated by Django 4.0.4 on 2022-05-23 07:14\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"documents\", \"1021_webp_thumbnail_conversion\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"PaperlessTask\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"task_id\", models.CharField(max_length=128)),\n (\"name\", models.CharField(max_length=256, null=True)),\n (\n \"created\",\n models.DateTimeField(auto_now=True, verbose_name=\"created\"),\n ),\n (\n \"started\",\n models.DateTimeField(null=True, verbose_name=\"started\"),\n ),\n (\"acknowledged\", models.BooleanField(default=False)),\n (\n \"attempted_task\",\n models.OneToOneField(\n blank=True,\n null=True,\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"attempted_task\",\n # This is a dummy field, 1026 will fix up the column\n # This manual change is required, as django doesn't django doesn't really support\n # removing an app which has migration deps like this\n to=\"documents.document\",\n ),\n ),\n ],\n )\n ]\n", "path": "src/documents/migrations/1022_paperlesstask.py"}]}
| 2,375 | 147 |
gh_patches_debug_524
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reload Behavior: Documentation != Code, Unnecessary Reloads
Hey! I upgraded to 0.11.4 shortly after release today, and it has a couple of quirks.
1. The documentation [here](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-1819b1daaccb3d358620ade9c67e9118R22) says "python file changes" but the code says ["all non-dotfile changes"](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-b0da863c7164698a2ef0fa805e4a9197R40).
2. That behavior, while from the test cases seems to be intended to roll up things like `.graphql` files, also unfortunately rolls up `.pyc` files, meaning every restart is a double restart:
```
WARNING: Detected file change in 'app/main.py'. Reloading...
INFO: Shutting down
INFO: Waiting for application shutdown.
INFO: Application shutdown complete.
INFO: Finished server process [87024]
INFO: Started server process [87080]
INFO: Waiting for application startup.
INFO: Application startup complete.
WARNING: Detected file change in 'app/__pycache__/main.cpython-37.pyc'. Reloading...
INFO: Shutting down
INFO: Waiting for application shutdown.
INFO: Application shutdown complete.
INFO: Finished server process [87080]
INFO: Started server process [87093]
INFO: Waiting for application startup.
INFO: Application startup complete.
```
It might be better to use [Path.glob](https://docs.python.org/3/library/pathlib.html#pathlib.Path.glob) so users can specify file extensions and paths more explicitly than with `os.walk`, but it's published already so maybe as another flag?
3. A minor point, but worth noting in the docs: `--reload_dir` on the CLI becomes `reload_dirs=['my_dir']` in code: `uvicorn.run('app.main:app', host="0.0.0.0", port=8000, reload=True, reload_dirs=['app'])`
Thanks for making this great library!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/__init__.py`
Content:
```
1 from uvicorn.config import Config
2 from uvicorn.main import Server, main, run
3
4 __version__ = "0.11.4"
5 __all__ = ["main", "run", "Config", "Server"]
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py
--- a/uvicorn/__init__.py
+++ b/uvicorn/__init__.py
@@ -1,5 +1,5 @@
from uvicorn.config import Config
from uvicorn.main import Server, main, run
-__version__ = "0.11.4"
+__version__ = "0.11.5"
__all__ = ["main", "run", "Config", "Server"]
|
{"golden_diff": "diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py\n--- a/uvicorn/__init__.py\n+++ b/uvicorn/__init__.py\n@@ -1,5 +1,5 @@\n from uvicorn.config import Config\n from uvicorn.main import Server, main, run\n \n-__version__ = \"0.11.4\"\n+__version__ = \"0.11.5\"\n __all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "issue": "Reload Behavior: Documentation != Code, Unnecessary Reloads\nHey! I upgraded to 0.11.4 shortly after release today, and it has a couple of quirks.\r\n\r\n1. The documentation [here](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-1819b1daaccb3d358620ade9c67e9118R22) says \"python file changes\" but the code says [\"all non-dotfile changes\"](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-b0da863c7164698a2ef0fa805e4a9197R40).\r\n2. That behavior, while from the test cases seems to be intended to roll up things like `.graphql` files, also unfortunately rolls up `.pyc` files, meaning every restart is a double restart:\r\n\r\n```\r\nWARNING: Detected file change in 'app/main.py'. Reloading...\r\nINFO: Shutting down\r\nINFO: Waiting for application shutdown.\r\nINFO: Application shutdown complete.\r\nINFO: Finished server process [87024]\r\nINFO: Started server process [87080]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nWARNING: Detected file change in 'app/__pycache__/main.cpython-37.pyc'. Reloading...\r\nINFO: Shutting down\r\nINFO: Waiting for application shutdown.\r\nINFO: Application shutdown complete.\r\nINFO: Finished server process [87080]\r\nINFO: Started server process [87093]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\n```\r\n\r\nIt might be better to use [Path.glob](https://docs.python.org/3/library/pathlib.html#pathlib.Path.glob) so users can specify file extensions and paths more explicitly than with `os.walk`, but it's published already so maybe as another flag?\r\n\r\n3. A minor point, but worth noting in the docs: `--reload_dir` on the CLI becomes `reload_dirs=['my_dir']` in code: `uvicorn.run('app.main:app', host=\"0.0.0.0\", port=8000, reload=True, reload_dirs=['app'])`\r\n\r\nThanks for making this great library!\n", "before_files": [{"content": "from uvicorn.config import Config\nfrom uvicorn.main import Server, main, run\n\n__version__ = \"0.11.4\"\n__all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "path": "uvicorn/__init__.py"}], "after_files": [{"content": "from uvicorn.config import Config\nfrom uvicorn.main import Server, main, run\n\n__version__ = \"0.11.5\"\n__all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "path": "uvicorn/__init__.py"}]}
| 840 | 110 |
gh_patches_debug_6220
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-5955
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix the count of total projects shown on the homepage
On the hompage (https://readthedocs.org/), we show the total count of the projects.

This number also includes projects that are identified as spam.
It would be good if we exclude spam projects from it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/core/views/__init__.py`
Content:
```
1 """
2 Core views.
3
4 Including the main homepage, documentation and header rendering,
5 and server errors.
6 """
7
8 import os
9 import logging
10 from urllib.parse import urlparse
11
12 from django.conf import settings
13 from django.http import HttpResponseRedirect, Http404, JsonResponse
14 from django.shortcuts import render, get_object_or_404, redirect
15 from django.views.generic import TemplateView
16 from django.views.static import serve as static_serve
17
18 from readthedocs.builds.models import Version
19 from readthedocs.core.utils.general import wipe_version_via_slugs
20 from readthedocs.core.resolver import resolve_path
21 from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
22 from readthedocs.projects.constants import PRIVATE
23 from readthedocs.projects.models import HTMLFile, Project
24 from readthedocs.redirects.utils import (
25 get_redirect_response,
26 project_and_path_from_request,
27 language_and_version_from_path
28 )
29
30 log = logging.getLogger(__name__)
31
32
33 class NoProjectException(Exception):
34 pass
35
36
37 class HomepageView(TemplateView):
38
39 template_name = 'homepage.html'
40
41 def get_context_data(self, **kwargs):
42 """Add latest builds and featured projects."""
43 context = super().get_context_data(**kwargs)
44 context['featured_list'] = Project.objects.filter(featured=True)
45 context['projects_count'] = Project.objects.count()
46 return context
47
48
49 class SupportView(TemplateView):
50 template_name = 'support.html'
51
52 def get_context_data(self, **kwargs):
53 context = super().get_context_data(**kwargs)
54 support_email = settings.SUPPORT_EMAIL
55 if not support_email:
56 support_email = 'support@{domain}'.format(
57 domain=settings.PRODUCTION_DOMAIN
58 )
59
60 context['support_email'] = support_email
61 return context
62
63
64 def random_page(request, project_slug=None): # pylint: disable=unused-argument
65 html_file = HTMLFile.objects.internal().order_by('?')
66 if project_slug:
67 html_file = html_file.filter(project__slug=project_slug)
68 html_file = html_file.first()
69 if html_file is None:
70 raise Http404
71 url = html_file.get_absolute_url()
72 return HttpResponseRedirect(url)
73
74
75 def wipe_version(request, project_slug, version_slug):
76 version = get_object_or_404(
77 Version.internal.all(),
78 project__slug=project_slug,
79 slug=version_slug,
80 )
81 # We need to check by ``for_admin_user`` here to allow members of the
82 # ``Admin`` team (which doesn't own the project) under the corporate site.
83 if version.project not in Project.objects.for_admin_user(user=request.user):
84 raise Http404('You must own this project to wipe it.')
85
86 if request.method == 'POST':
87 wipe_version_via_slugs(
88 version_slug=version_slug,
89 project_slug=project_slug,
90 )
91 return redirect('project_version_list', project_slug)
92 return render(
93 request,
94 'wipe_version.html',
95 {'version': version, 'project': version.project},
96 )
97
98
99 def server_error_500(request, template_name='500.html'):
100 """A simple 500 handler so we get media."""
101 r = render(request, template_name)
102 r.status_code = 500
103 return r
104
105
106 def server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa
107 """
108 A simple 404 handler so we get media.
109
110 .. note::
111
112 Marking exception as optional to make /404/ testing page to work.
113 """
114 response = get_redirect_response(request, full_path=request.get_full_path())
115
116 # Return a redirect response if there is one
117 if response:
118 if response.url == request.build_absolute_uri():
119 # check that we do have a response and avoid infinite redirect
120 log.warning(
121 'Infinite Redirect: FROM URL is the same than TO URL. url=%s',
122 response.url,
123 )
124 else:
125 return response
126
127 # Try to serve custom 404 pages if it's a subdomain/cname
128 if getattr(request, 'subdomain', False) or getattr(request, 'cname', False):
129 return server_error_404_subdomain(request, template_name)
130
131 # Return the default 404 page generated by Read the Docs
132 r = render(request, template_name)
133 r.status_code = 404
134 return r
135
136
137 def server_error_404_subdomain(request, template_name='404.html'):
138 """
139 Handler for 404 pages on subdomains.
140
141 Check if the project associated has a custom ``404.html`` and serve this
142 page. First search for a 404 page in the current version, then continues
143 with the default version and finally, if none of them are found, the Read
144 the Docs default page (Maze Found) is rendered by Django and served.
145 """
146
147 def resolve_404_path(project, version_slug=None, language=None, filename='404.html'):
148 """
149 Helper to resolve the path of ``404.html`` for project.
150
151 The resolution is based on ``project`` object, version slug and
152 language.
153
154 :returns: tuple containing the (basepath, filename)
155 :rtype: tuple
156 """
157 filename = resolve_path(
158 project,
159 version_slug=version_slug,
160 language=language,
161 filename=filename,
162 subdomain=True, # subdomain will make it a "full" path without a URL prefix
163 )
164
165 # This breaks path joining, by ignoring the root when given an "absolute" path
166 if filename[0] == '/':
167 filename = filename[1:]
168
169 version = None
170 if version_slug:
171 version_qs = project.versions.filter(slug=version_slug)
172 if version_qs.exists():
173 version = version_qs.first()
174
175 private = any([
176 version and version.privacy_level == PRIVATE,
177 not version and project.privacy_level == PRIVATE,
178 ])
179 if private:
180 symlink = PrivateSymlink(project)
181 else:
182 symlink = PublicSymlink(project)
183 basepath = symlink.project_root
184 fullpath = os.path.join(basepath, filename)
185 return (basepath, filename, fullpath)
186
187 project, full_path = project_and_path_from_request(request, request.get_full_path())
188
189 if project:
190 language = None
191 version_slug = None
192 schema, netloc, path, params, query, fragments = urlparse(full_path)
193 if not project.single_version:
194 language, version_slug, path = language_and_version_from_path(path)
195
196 # Firstly, attempt to serve the 404 of the current version (version_slug)
197 # Secondly, try to serve the 404 page for the default version
198 # (project.get_default_version())
199 for slug in (version_slug, project.get_default_version()):
200 for tryfile in ('404.html', '404/index.html'):
201 basepath, filename, fullpath = resolve_404_path(project, slug, language, tryfile)
202 if os.path.exists(fullpath):
203 log.debug(
204 'serving 404.html page current version: [project: %s] [version: %s]',
205 project.slug,
206 slug,
207 )
208 r = static_serve(request, filename, basepath)
209 r.status_code = 404
210 return r
211
212 # Finally, return the default 404 page generated by Read the Docs
213 r = render(request, template_name)
214 r.status_code = 404
215 return r
216
217
218 def do_not_track(request):
219 dnt_header = request.META.get('HTTP_DNT')
220
221 # https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation
222 return JsonResponse( # pylint: disable=redundant-content-type-for-json-response
223 {
224 'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',
225 'same-party': [
226 'readthedocs.org',
227 'readthedocs.com',
228 'readthedocs.io', # .org Documentation Sites
229 'readthedocs-hosted.com', # .com Documentation Sites
230 ],
231 'tracking': 'N' if dnt_header == '1' else 'T',
232 }, content_type='application/tracking-status+json',
233 )
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/core/views/__init__.py b/readthedocs/core/views/__init__.py
--- a/readthedocs/core/views/__init__.py
+++ b/readthedocs/core/views/__init__.py
@@ -42,7 +42,7 @@
"""Add latest builds and featured projects."""
context = super().get_context_data(**kwargs)
context['featured_list'] = Project.objects.filter(featured=True)
- context['projects_count'] = Project.objects.count()
+ context['projects_count'] = Project.objects.exclude(users__profile__banned=True).count()
return context
|
{"golden_diff": "diff --git a/readthedocs/core/views/__init__.py b/readthedocs/core/views/__init__.py\n--- a/readthedocs/core/views/__init__.py\n+++ b/readthedocs/core/views/__init__.py\n@@ -42,7 +42,7 @@\n \"\"\"Add latest builds and featured projects.\"\"\"\n context = super().get_context_data(**kwargs)\n context['featured_list'] = Project.objects.filter(featured=True)\n- context['projects_count'] = Project.objects.count()\n+ context['projects_count'] = Project.objects.exclude(users__profile__banned=True).count()\n return context\n", "issue": "Fix the count of total projects shown on the homepage\nOn the hompage (https://readthedocs.org/), we show the total count of the projects.\r\n\r\n\r\n\r\nThis number also includes projects that are identified as spam.\r\nIt would be good if we exclude spam projects from it.\n", "before_files": [{"content": "\"\"\"\nCore views.\n\nIncluding the main homepage, documentation and header rendering,\nand server errors.\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, Http404, JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import TemplateView\nfrom django.views.static import serve as static_serve\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.utils.general import wipe_version_via_slugs\nfrom readthedocs.core.resolver import resolve_path\nfrom readthedocs.core.symlink import PrivateSymlink, PublicSymlink\nfrom readthedocs.projects.constants import PRIVATE\nfrom readthedocs.projects.models import HTMLFile, Project\nfrom readthedocs.redirects.utils import (\n get_redirect_response,\n project_and_path_from_request,\n language_and_version_from_path\n)\n\nlog = logging.getLogger(__name__)\n\n\nclass NoProjectException(Exception):\n pass\n\n\nclass HomepageView(TemplateView):\n\n template_name = 'homepage.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Add latest builds and featured projects.\"\"\"\n context = super().get_context_data(**kwargs)\n context['featured_list'] = Project.objects.filter(featured=True)\n context['projects_count'] = Project.objects.count()\n return context\n\n\nclass SupportView(TemplateView):\n template_name = 'support.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n support_email = settings.SUPPORT_EMAIL\n if not support_email:\n support_email = 'support@{domain}'.format(\n domain=settings.PRODUCTION_DOMAIN\n )\n\n context['support_email'] = support_email\n return context\n\n\ndef random_page(request, project_slug=None): # pylint: disable=unused-argument\n html_file = HTMLFile.objects.internal().order_by('?')\n if project_slug:\n html_file = html_file.filter(project__slug=project_slug)\n html_file = html_file.first()\n if html_file is None:\n raise Http404\n url = html_file.get_absolute_url()\n return HttpResponseRedirect(url)\n\n\ndef wipe_version(request, project_slug, version_slug):\n version = get_object_or_404(\n Version.internal.all(),\n project__slug=project_slug,\n slug=version_slug,\n )\n # We need to check by ``for_admin_user`` here to allow members of the\n # ``Admin`` team (which doesn't own the project) under the corporate site.\n if version.project not in Project.objects.for_admin_user(user=request.user):\n raise Http404('You must own this project to wipe it.')\n\n if request.method == 'POST':\n wipe_version_via_slugs(\n version_slug=version_slug,\n project_slug=project_slug,\n )\n return redirect('project_version_list', project_slug)\n return render(\n request,\n 'wipe_version.html',\n {'version': version, 'project': version.project},\n )\n\n\ndef server_error_500(request, template_name='500.html'):\n \"\"\"A simple 500 handler so we get media.\"\"\"\n r = render(request, template_name)\n r.status_code = 500\n return r\n\n\ndef server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa\n \"\"\"\n A simple 404 handler so we get media.\n\n .. note::\n\n Marking exception as optional to make /404/ testing page to work.\n \"\"\"\n response = get_redirect_response(request, full_path=request.get_full_path())\n\n # Return a redirect response if there is one\n if response:\n if response.url == request.build_absolute_uri():\n # check that we do have a response and avoid infinite redirect\n log.warning(\n 'Infinite Redirect: FROM URL is the same than TO URL. url=%s',\n response.url,\n )\n else:\n return response\n\n # Try to serve custom 404 pages if it's a subdomain/cname\n if getattr(request, 'subdomain', False) or getattr(request, 'cname', False):\n return server_error_404_subdomain(request, template_name)\n\n # Return the default 404 page generated by Read the Docs\n r = render(request, template_name)\n r.status_code = 404\n return r\n\n\ndef server_error_404_subdomain(request, template_name='404.html'):\n \"\"\"\n Handler for 404 pages on subdomains.\n\n Check if the project associated has a custom ``404.html`` and serve this\n page. First search for a 404 page in the current version, then continues\n with the default version and finally, if none of them are found, the Read\n the Docs default page (Maze Found) is rendered by Django and served.\n \"\"\"\n\n def resolve_404_path(project, version_slug=None, language=None, filename='404.html'):\n \"\"\"\n Helper to resolve the path of ``404.html`` for project.\n\n The resolution is based on ``project`` object, version slug and\n language.\n\n :returns: tuple containing the (basepath, filename)\n :rtype: tuple\n \"\"\"\n filename = resolve_path(\n project,\n version_slug=version_slug,\n language=language,\n filename=filename,\n subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n )\n\n # This breaks path joining, by ignoring the root when given an \"absolute\" path\n if filename[0] == '/':\n filename = filename[1:]\n\n version = None\n if version_slug:\n version_qs = project.versions.filter(slug=version_slug)\n if version_qs.exists():\n version = version_qs.first()\n\n private = any([\n version and version.privacy_level == PRIVATE,\n not version and project.privacy_level == PRIVATE,\n ])\n if private:\n symlink = PrivateSymlink(project)\n else:\n symlink = PublicSymlink(project)\n basepath = symlink.project_root\n fullpath = os.path.join(basepath, filename)\n return (basepath, filename, fullpath)\n\n project, full_path = project_and_path_from_request(request, request.get_full_path())\n\n if project:\n language = None\n version_slug = None\n schema, netloc, path, params, query, fragments = urlparse(full_path)\n if not project.single_version:\n language, version_slug, path = language_and_version_from_path(path)\n\n # Firstly, attempt to serve the 404 of the current version (version_slug)\n # Secondly, try to serve the 404 page for the default version\n # (project.get_default_version())\n for slug in (version_slug, project.get_default_version()):\n for tryfile in ('404.html', '404/index.html'):\n basepath, filename, fullpath = resolve_404_path(project, slug, language, tryfile)\n if os.path.exists(fullpath):\n log.debug(\n 'serving 404.html page current version: [project: %s] [version: %s]',\n project.slug,\n slug,\n )\n r = static_serve(request, filename, basepath)\n r.status_code = 404\n return r\n\n # Finally, return the default 404 page generated by Read the Docs\n r = render(request, template_name)\n r.status_code = 404\n return r\n\n\ndef do_not_track(request):\n dnt_header = request.META.get('HTTP_DNT')\n\n # https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation\n return JsonResponse( # pylint: disable=redundant-content-type-for-json-response\n {\n 'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',\n 'same-party': [\n 'readthedocs.org',\n 'readthedocs.com',\n 'readthedocs.io', # .org Documentation Sites\n 'readthedocs-hosted.com', # .com Documentation Sites\n ],\n 'tracking': 'N' if dnt_header == '1' else 'T',\n }, content_type='application/tracking-status+json',\n )\n", "path": "readthedocs/core/views/__init__.py"}], "after_files": [{"content": "\"\"\"\nCore views.\n\nIncluding the main homepage, documentation and header rendering,\nand server errors.\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect, Http404, JsonResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import TemplateView\nfrom django.views.static import serve as static_serve\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.utils.general import wipe_version_via_slugs\nfrom readthedocs.core.resolver import resolve_path\nfrom readthedocs.core.symlink import PrivateSymlink, PublicSymlink\nfrom readthedocs.projects.constants import PRIVATE\nfrom readthedocs.projects.models import HTMLFile, Project\nfrom readthedocs.redirects.utils import (\n get_redirect_response,\n project_and_path_from_request,\n language_and_version_from_path\n)\n\nlog = logging.getLogger(__name__)\n\n\nclass NoProjectException(Exception):\n pass\n\n\nclass HomepageView(TemplateView):\n\n template_name = 'homepage.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Add latest builds and featured projects.\"\"\"\n context = super().get_context_data(**kwargs)\n context['featured_list'] = Project.objects.filter(featured=True)\n context['projects_count'] = Project.objects.exclude(users__profile__banned=True).count()\n return context\n\n\nclass SupportView(TemplateView):\n template_name = 'support.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n support_email = settings.SUPPORT_EMAIL\n if not support_email:\n support_email = 'support@{domain}'.format(\n domain=settings.PRODUCTION_DOMAIN\n )\n\n context['support_email'] = support_email\n return context\n\n\ndef random_page(request, project_slug=None): # pylint: disable=unused-argument\n html_file = HTMLFile.objects.internal().order_by('?')\n if project_slug:\n html_file = html_file.filter(project__slug=project_slug)\n html_file = html_file.first()\n if html_file is None:\n raise Http404\n url = html_file.get_absolute_url()\n return HttpResponseRedirect(url)\n\n\ndef wipe_version(request, project_slug, version_slug):\n version = get_object_or_404(\n Version.internal.all(),\n project__slug=project_slug,\n slug=version_slug,\n )\n # We need to check by ``for_admin_user`` here to allow members of the\n # ``Admin`` team (which doesn't own the project) under the corporate site.\n if version.project not in Project.objects.for_admin_user(user=request.user):\n raise Http404('You must own this project to wipe it.')\n\n if request.method == 'POST':\n wipe_version_via_slugs(\n version_slug=version_slug,\n project_slug=project_slug,\n )\n return redirect('project_version_list', project_slug)\n return render(\n request,\n 'wipe_version.html',\n {'version': version, 'project': version.project},\n )\n\n\ndef server_error_500(request, template_name='500.html'):\n \"\"\"A simple 500 handler so we get media.\"\"\"\n r = render(request, template_name)\n r.status_code = 500\n return r\n\n\ndef server_error_404(request, exception=None, template_name='404.html'): # pylint: disable=unused-argument # noqa\n \"\"\"\n A simple 404 handler so we get media.\n\n .. note::\n\n Marking exception as optional to make /404/ testing page to work.\n \"\"\"\n response = get_redirect_response(request, full_path=request.get_full_path())\n\n # Return a redirect response if there is one\n if response:\n if response.url == request.build_absolute_uri():\n # check that we do have a response and avoid infinite redirect\n log.warning(\n 'Infinite Redirect: FROM URL is the same than TO URL. url=%s',\n response.url,\n )\n else:\n return response\n\n # Try to serve custom 404 pages if it's a subdomain/cname\n if getattr(request, 'subdomain', False) or getattr(request, 'cname', False):\n return server_error_404_subdomain(request, template_name)\n\n # Return the default 404 page generated by Read the Docs\n r = render(request, template_name)\n r.status_code = 404\n return r\n\n\ndef server_error_404_subdomain(request, template_name='404.html'):\n \"\"\"\n Handler for 404 pages on subdomains.\n\n Check if the project associated has a custom ``404.html`` and serve this\n page. First search for a 404 page in the current version, then continues\n with the default version and finally, if none of them are found, the Read\n the Docs default page (Maze Found) is rendered by Django and served.\n \"\"\"\n\n def resolve_404_path(project, version_slug=None, language=None, filename='404.html'):\n \"\"\"\n Helper to resolve the path of ``404.html`` for project.\n\n The resolution is based on ``project`` object, version slug and\n language.\n\n :returns: tuple containing the (basepath, filename)\n :rtype: tuple\n \"\"\"\n filename = resolve_path(\n project,\n version_slug=version_slug,\n language=language,\n filename=filename,\n subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n )\n\n # This breaks path joining, by ignoring the root when given an \"absolute\" path\n if filename[0] == '/':\n filename = filename[1:]\n\n version = None\n if version_slug:\n version_qs = project.versions.filter(slug=version_slug)\n if version_qs.exists():\n version = version_qs.first()\n\n private = any([\n version and version.privacy_level == PRIVATE,\n not version and project.privacy_level == PRIVATE,\n ])\n if private:\n symlink = PrivateSymlink(project)\n else:\n symlink = PublicSymlink(project)\n basepath = symlink.project_root\n fullpath = os.path.join(basepath, filename)\n return (basepath, filename, fullpath)\n\n project, full_path = project_and_path_from_request(request, request.get_full_path())\n\n if project:\n language = None\n version_slug = None\n schema, netloc, path, params, query, fragments = urlparse(full_path)\n if not project.single_version:\n language, version_slug, path = language_and_version_from_path(path)\n\n # Firstly, attempt to serve the 404 of the current version (version_slug)\n # Secondly, try to serve the 404 page for the default version\n # (project.get_default_version())\n for slug in (version_slug, project.get_default_version()):\n for tryfile in ('404.html', '404/index.html'):\n basepath, filename, fullpath = resolve_404_path(project, slug, language, tryfile)\n if os.path.exists(fullpath):\n log.debug(\n 'serving 404.html page current version: [project: %s] [version: %s]',\n project.slug,\n slug,\n )\n r = static_serve(request, filename, basepath)\n r.status_code = 404\n return r\n\n # Finally, return the default 404 page generated by Read the Docs\n r = render(request, template_name)\n r.status_code = 404\n return r\n\n\ndef do_not_track(request):\n dnt_header = request.META.get('HTTP_DNT')\n\n # https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation\n return JsonResponse( # pylint: disable=redundant-content-type-for-json-response\n {\n 'policy': 'https://docs.readthedocs.io/en/latest/privacy-policy.html',\n 'same-party': [\n 'readthedocs.org',\n 'readthedocs.com',\n 'readthedocs.io', # .org Documentation Sites\n 'readthedocs-hosted.com', # .com Documentation Sites\n ],\n 'tracking': 'N' if dnt_header == '1' else 'T',\n }, content_type='application/tracking-status+json',\n )\n", "path": "readthedocs/core/views/__init__.py"}]}
| 2,833 | 131 |
gh_patches_debug_35579
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-2202
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add the COMBO token to the airdrops
## Abstract
Furucombo has launched a token and airdropped to their users and donors on January 15th.
These tokens can be claimed periodically over a vesting period of 8 weeks.
## Motivation
The airdrop ranges with either 350 or 900 COMBO which at the time of writing it worth 560 and 1450 usd. It would be nice have a easy way to track these tokes so the user can claim them.
<!-- Why do you think this feature should be addressed. What is the value added to the users of Rotki and why would they want to have it implemented? -->
## Specification
Add the COMBO token to the airdrop section and update the balance of the COMBO tokens in each vesting cycle.
Each vesting cycle is at every friday for 8 weeks afaik.
Idk how much will be distributed each cycle but those who had 350 where able to unlock 87.5 today.
A list of eligible addresses can be found here:
https://docs.google.com/spreadsheets/d/113AiPrGJ-yp7g-Kdo_IofMWiftc7msEoBMmKNKxJ4fo/edit#gid=0
The announcement of the token airdrop can be found here:
https://medium.com/furucombo/first-furucombo-grant-7b1e48175c99
<!-- If the feature is techical in nature please write as detailed as possible a specification of what needs to be built. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/constants/assets.py`
Content:
```
1 from rotkehlchen.assets.asset import Asset, EthereumToken
2
3 A_USD = Asset('USD')
4 A_EUR = Asset('EUR')
5 A_GBP = Asset('GBP')
6 A_JPY = Asset('JPY')
7 A_CNY = Asset('CNY')
8 A_CAD = Asset('CAD')
9 A_KRW = Asset('KRW')
10 A_RUB = Asset('RUB')
11 A_CHF = Asset('CHF')
12 A_TRY = Asset('TRY')
13 A_ZAR = Asset('ZAR')
14 A_AUD = Asset('AUD')
15 A_NZD = Asset('NZD')
16 A_BRL = Asset('BRL')
17 FIAT_CURRENCIES = (
18 A_USD,
19 A_EUR,
20 A_GBP,
21 A_JPY,
22 A_CNY,
23 A_CAD,
24 A_KRW,
25 A_RUB,
26 A_CHF,
27 A_TRY,
28 A_ZAR,
29 A_AUD,
30 A_NZD,
31 A_BRL,
32 )
33
34 S_BTC = 'BTC'
35 S_ETH = 'ETH'
36 S_KSM = 'KSM'
37
38 A_BTC = Asset(S_BTC)
39 A_BCH = Asset('BCH')
40 A_BAL = Asset('BAL')
41 A_BSV = Asset('BSV')
42 A_ETH = Asset(S_ETH)
43 A_ETH2 = Asset('ETH2')
44 A_ETC = Asset('ETC')
45 A_KSM = Asset(S_KSM)
46 A_BAT = EthereumToken('BAT')
47 A_UNI = EthereumToken('UNI')
48 A_1INCH = EthereumToken('1INCH')
49 A_DAI = EthereumToken('DAI')
50 A_SAI = EthereumToken('SAI')
51 A_YFI = EthereumToken('YFI')
52 A_USDT = EthereumToken('USDT')
53 A_USDC = EthereumToken('USDC')
54 A_TUSD = EthereumToken('TUSD')
55 A_ALINK = EthereumToken('aLINK')
56 A_GUSD = EthereumToken('GUSD')
57 A_CRV = EthereumToken('CRV')
58 A_KNC = EthereumToken('KNC')
59 A_WBTC = EthereumToken('WBTC')
60 A_WETH = EthereumToken('WETH')
61 A_ZRX = EthereumToken('ZRX')
62 A_MANA = EthereumToken('MANA')
63 A_PAX = EthereumToken('PAX')
64 A_COMP = EthereumToken('COMP')
65 A_LRC = EthereumToken('LRC')
66 A_LINK = EthereumToken('LINK')
67 A_ADX = EthereumToken('ADX')
68 A_TORN = EthereumToken('TORN')
69 A_CORN = EthereumToken('CORN-2')
70 A_GRAIN = EthereumToken('GRAIN')
71
```
Path: `rotkehlchen/chain/ethereum/airdrops.py`
Content:
```
1 from rotkehlchen.typing import ChecksumEthAddress
2 from typing import List, Dict, TextIO, Iterator, Tuple
3 from rotkehlchen.constants.assets import A_UNI, A_1INCH, A_TORN, A_CORN, A_GRAIN
4 import csv
5 import requests
6 from pathlib import Path
7 from collections import defaultdict
8 from rotkehlchen.errors import RemoteError
9 from rotkehlchen.chain.ethereum.utils import token_normalized_value_decimals
10
11 AIRDROPS = {
12 'uniswap': (
13 # is checksummed
14 'https://gist.githubusercontent.com/LefterisJP/d883cb7187a7c4fcf98c7a62f45568e7/raw/3718c95d572a29b9c3906d7c64726d3bd7524bfd/uniswap.csv', # noqa: E501
15 A_UNI,
16 'https://app.uniswap.org/',
17 ),
18 '1inch': (
19 # is checksummed
20 'https://gist.githubusercontent.com/LefterisJP/8f41d1511bf354d7e56810188116a410/raw/87d967e86e1435aa3a9ddb97ce20531e4e52dbad/1inch.csv', # noqa: E501
21 A_1INCH,
22 'https://1inch.exchange/',
23 ),
24 'tornado': (
25 # is checksummed
26 'https://raw.githubusercontent.com/tornadocash/airdrop/master/airdrop.csv',
27 A_TORN, # Don't have TORN token yet?
28 'https://tornado.cash/',
29 ),
30 'cornichon': (
31 # is checksummed
32 'https://gist.githubusercontent.com/LefterisJP/5199d8bc6caa3253c343cd5084489088/raw/7e9ca4c4772fc50780bfe9997e1c43525e1b7445/cornichon_airdrop.csv', # noqa: E501
33 A_CORN,
34 'https://cornichon.ape.tax/',
35 ),
36 'grain': (
37 # is checksummed
38 'https://gist.githubusercontent.com/LefterisJP/08d7a5b28876741b300c944650c89280/raw/987ab4a92d5363fdbe262f639565732bd1fd3921/grain_iou.csv', # noqa: E501
39 A_GRAIN,
40 'https://claim.harvest.finance/',
41 ),
42 }
43
44
45 def get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:
46 airdrops_dir = data_dir / 'airdrops'
47 airdrops_dir.mkdir(parents=True, exist_ok=True)
48 filename = airdrops_dir / f'{name}.csv'
49 if not filename.is_file():
50 # if not cached, get it from the gist
51 try:
52 request = requests.get(AIRDROPS[name][0])
53 except requests.exceptions.RequestException as e:
54 raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e
55
56 with open(filename, 'w') as f:
57 f.write(request.content.decode('utf-8'))
58
59 csvfile = open(filename, 'r')
60 iterator = csv.reader(csvfile)
61 next(iterator) # skip header
62 return iterator, csvfile
63
64
65 def check_airdrops(
66 addresses: List[ChecksumEthAddress],
67 data_dir: Path,
68 ) -> Dict[ChecksumEthAddress, Dict]:
69 """Checks airdrop data for the given list of ethereum addresses
70
71 May raise:
72 - RemoteError if the remote request fails
73 """
74 found_data: Dict[ChecksumEthAddress, Dict] = defaultdict(lambda: defaultdict(dict))
75 for protocol_name, airdrop_data in AIRDROPS.items():
76 data, csvfile = get_airdrop_data(protocol_name, data_dir)
77 for addr, amount, *_ in data:
78 # not doing to_checksum_address() here since the file addresses are checksummed
79 # and doing to_checksum_address() so many times hits performance
80 if protocol_name in ('cornichon', 'tornado', 'grain'):
81 amount = token_normalized_value_decimals(int(amount), 18)
82 if addr in addresses:
83 found_data[addr][protocol_name] = {
84 'amount': str(amount),
85 'asset': airdrop_data[1],
86 'link': airdrop_data[2],
87 }
88 csvfile.close()
89
90 return dict(found_data)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rotkehlchen/chain/ethereum/airdrops.py b/rotkehlchen/chain/ethereum/airdrops.py
--- a/rotkehlchen/chain/ethereum/airdrops.py
+++ b/rotkehlchen/chain/ethereum/airdrops.py
@@ -1,12 +1,14 @@
-from rotkehlchen.typing import ChecksumEthAddress
-from typing import List, Dict, TextIO, Iterator, Tuple
-from rotkehlchen.constants.assets import A_UNI, A_1INCH, A_TORN, A_CORN, A_GRAIN
import csv
-import requests
-from pathlib import Path
from collections import defaultdict
-from rotkehlchen.errors import RemoteError
+from pathlib import Path
+from typing import Dict, Iterator, List, TextIO, Tuple
+
+import requests
+
from rotkehlchen.chain.ethereum.utils import token_normalized_value_decimals
+from rotkehlchen.constants.assets import A_1INCH, A_COMBO, A_CORN, A_GRAIN, A_LDO, A_TORN, A_UNI
+from rotkehlchen.errors import RemoteError
+from rotkehlchen.typing import ChecksumEthAddress
AIRDROPS = {
'uniswap': (
@@ -39,6 +41,18 @@
A_GRAIN,
'https://claim.harvest.finance/',
),
+ 'furucombo': (
+ # is checksummed
+ 'https://gist.githubusercontent.com/LefterisJP/69612e155e8063fd6b3422d4efbf22a3/raw/b9023960ab1c478ee2620c456e208e5124115c19/furucombo_airdrop.csv', # noqa: E501
+ A_COMBO,
+ 'https://furucombo.app/',
+ ),
+ 'lido': (
+ # is checksummed
+ 'https://gist.githubusercontent.com/LefterisJP/57a8d65280a482fed6f3e2cc00c0e540/raw/e6ebac56c438cc8a882585c5f5bfba64eb57c424/lido_airdrop.csv', # noqa: E501
+ A_LDO,
+ 'https://lido.fi/',
+ ),
}
@@ -77,7 +91,7 @@
for addr, amount, *_ in data:
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance
- if protocol_name in ('cornichon', 'tornado', 'grain'):
+ if protocol_name in ('cornichon', 'tornado', 'grain', 'lido'):
amount = token_normalized_value_decimals(int(amount), 18)
if addr in addresses:
found_data[addr][protocol_name] = {
diff --git a/rotkehlchen/constants/assets.py b/rotkehlchen/constants/assets.py
--- a/rotkehlchen/constants/assets.py
+++ b/rotkehlchen/constants/assets.py
@@ -68,3 +68,5 @@
A_TORN = EthereumToken('TORN')
A_CORN = EthereumToken('CORN-2')
A_GRAIN = EthereumToken('GRAIN')
+A_COMBO = EthereumToken('COMBO')
+A_LDO = EthereumToken('LDO')
|
{"golden_diff": "diff --git a/rotkehlchen/chain/ethereum/airdrops.py b/rotkehlchen/chain/ethereum/airdrops.py\n--- a/rotkehlchen/chain/ethereum/airdrops.py\n+++ b/rotkehlchen/chain/ethereum/airdrops.py\n@@ -1,12 +1,14 @@\n-from rotkehlchen.typing import ChecksumEthAddress\n-from typing import List, Dict, TextIO, Iterator, Tuple\n-from rotkehlchen.constants.assets import A_UNI, A_1INCH, A_TORN, A_CORN, A_GRAIN\n import csv\n-import requests\n-from pathlib import Path\n from collections import defaultdict\n-from rotkehlchen.errors import RemoteError\n+from pathlib import Path\n+from typing import Dict, Iterator, List, TextIO, Tuple\n+\n+import requests\n+\n from rotkehlchen.chain.ethereum.utils import token_normalized_value_decimals\n+from rotkehlchen.constants.assets import A_1INCH, A_COMBO, A_CORN, A_GRAIN, A_LDO, A_TORN, A_UNI\n+from rotkehlchen.errors import RemoteError\n+from rotkehlchen.typing import ChecksumEthAddress\n \n AIRDROPS = {\n 'uniswap': (\n@@ -39,6 +41,18 @@\n A_GRAIN,\n 'https://claim.harvest.finance/',\n ),\n+ 'furucombo': (\n+ # is checksummed\n+ 'https://gist.githubusercontent.com/LefterisJP/69612e155e8063fd6b3422d4efbf22a3/raw/b9023960ab1c478ee2620c456e208e5124115c19/furucombo_airdrop.csv', # noqa: E501\n+ A_COMBO,\n+ 'https://furucombo.app/',\n+ ),\n+ 'lido': (\n+ # is checksummed\n+ 'https://gist.githubusercontent.com/LefterisJP/57a8d65280a482fed6f3e2cc00c0e540/raw/e6ebac56c438cc8a882585c5f5bfba64eb57c424/lido_airdrop.csv', # noqa: E501\n+ A_LDO,\n+ 'https://lido.fi/',\n+ ),\n }\n \n \n@@ -77,7 +91,7 @@\n for addr, amount, *_ in data:\n # not doing to_checksum_address() here since the file addresses are checksummed\n # and doing to_checksum_address() so many times hits performance\n- if protocol_name in ('cornichon', 'tornado', 'grain'):\n+ if protocol_name in ('cornichon', 'tornado', 'grain', 'lido'):\n amount = token_normalized_value_decimals(int(amount), 18)\n if addr in addresses:\n found_data[addr][protocol_name] = {\ndiff --git a/rotkehlchen/constants/assets.py b/rotkehlchen/constants/assets.py\n--- a/rotkehlchen/constants/assets.py\n+++ b/rotkehlchen/constants/assets.py\n@@ -68,3 +68,5 @@\n A_TORN = EthereumToken('TORN')\n A_CORN = EthereumToken('CORN-2')\n A_GRAIN = EthereumToken('GRAIN')\n+A_COMBO = EthereumToken('COMBO')\n+A_LDO = EthereumToken('LDO')\n", "issue": "Add the COMBO token to the airdrops\n## Abstract\r\n\r\nFurucombo has launched a token and airdropped to their users and donors on January 15th. \r\nThese tokens can be claimed periodically over a vesting period of 8 weeks.\r\n\r\n## Motivation\r\n\r\nThe airdrop ranges with either 350 or 900 COMBO which at the time of writing it worth 560 and 1450 usd. It would be nice have a easy way to track these tokes so the user can claim them.\r\n<!-- Why do you think this feature should be addressed. What is the value added to the users of Rotki and why would they want to have it implemented? -->\r\n\r\n## Specification\r\nAdd the COMBO token to the airdrop section and update the balance of the COMBO tokens in each vesting cycle.\r\nEach vesting cycle is at every friday for 8 weeks afaik. \r\nIdk how much will be distributed each cycle but those who had 350 where able to unlock 87.5 today. \r\n\r\nA list of eligible addresses can be found here:\r\nhttps://docs.google.com/spreadsheets/d/113AiPrGJ-yp7g-Kdo_IofMWiftc7msEoBMmKNKxJ4fo/edit#gid=0\r\nThe announcement of the token airdrop can be found here: \r\nhttps://medium.com/furucombo/first-furucombo-grant-7b1e48175c99\r\n<!-- If the feature is techical in nature please write as detailed as possible a specification of what needs to be built. -->\r\n\n", "before_files": [{"content": "from rotkehlchen.assets.asset import Asset, EthereumToken\n\nA_USD = Asset('USD')\nA_EUR = Asset('EUR')\nA_GBP = Asset('GBP')\nA_JPY = Asset('JPY')\nA_CNY = Asset('CNY')\nA_CAD = Asset('CAD')\nA_KRW = Asset('KRW')\nA_RUB = Asset('RUB')\nA_CHF = Asset('CHF')\nA_TRY = Asset('TRY')\nA_ZAR = Asset('ZAR')\nA_AUD = Asset('AUD')\nA_NZD = Asset('NZD')\nA_BRL = Asset('BRL')\nFIAT_CURRENCIES = (\n A_USD,\n A_EUR,\n A_GBP,\n A_JPY,\n A_CNY,\n A_CAD,\n A_KRW,\n A_RUB,\n A_CHF,\n A_TRY,\n A_ZAR,\n A_AUD,\n A_NZD,\n A_BRL,\n)\n\nS_BTC = 'BTC'\nS_ETH = 'ETH'\nS_KSM = 'KSM'\n\nA_BTC = Asset(S_BTC)\nA_BCH = Asset('BCH')\nA_BAL = Asset('BAL')\nA_BSV = Asset('BSV')\nA_ETH = Asset(S_ETH)\nA_ETH2 = Asset('ETH2')\nA_ETC = Asset('ETC')\nA_KSM = Asset(S_KSM)\nA_BAT = EthereumToken('BAT')\nA_UNI = EthereumToken('UNI')\nA_1INCH = EthereumToken('1INCH')\nA_DAI = EthereumToken('DAI')\nA_SAI = EthereumToken('SAI')\nA_YFI = EthereumToken('YFI')\nA_USDT = EthereumToken('USDT')\nA_USDC = EthereumToken('USDC')\nA_TUSD = EthereumToken('TUSD')\nA_ALINK = EthereumToken('aLINK')\nA_GUSD = EthereumToken('GUSD')\nA_CRV = EthereumToken('CRV')\nA_KNC = EthereumToken('KNC')\nA_WBTC = EthereumToken('WBTC')\nA_WETH = EthereumToken('WETH')\nA_ZRX = EthereumToken('ZRX')\nA_MANA = EthereumToken('MANA')\nA_PAX = EthereumToken('PAX')\nA_COMP = EthereumToken('COMP')\nA_LRC = EthereumToken('LRC')\nA_LINK = EthereumToken('LINK')\nA_ADX = EthereumToken('ADX')\nA_TORN = EthereumToken('TORN')\nA_CORN = EthereumToken('CORN-2')\nA_GRAIN = EthereumToken('GRAIN')\n", "path": "rotkehlchen/constants/assets.py"}, {"content": "from rotkehlchen.typing import ChecksumEthAddress\nfrom typing import List, Dict, TextIO, Iterator, Tuple\nfrom rotkehlchen.constants.assets import A_UNI, A_1INCH, A_TORN, A_CORN, A_GRAIN\nimport csv\nimport requests\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.chain.ethereum.utils import token_normalized_value_decimals\n\nAIRDROPS = {\n 'uniswap': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/d883cb7187a7c4fcf98c7a62f45568e7/raw/3718c95d572a29b9c3906d7c64726d3bd7524bfd/uniswap.csv', # noqa: E501\n A_UNI,\n 'https://app.uniswap.org/',\n ),\n '1inch': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/8f41d1511bf354d7e56810188116a410/raw/87d967e86e1435aa3a9ddb97ce20531e4e52dbad/1inch.csv', # noqa: E501\n A_1INCH,\n 'https://1inch.exchange/',\n ),\n 'tornado': (\n # is checksummed\n 'https://raw.githubusercontent.com/tornadocash/airdrop/master/airdrop.csv',\n A_TORN, # Don't have TORN token yet?\n 'https://tornado.cash/',\n ),\n 'cornichon': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/5199d8bc6caa3253c343cd5084489088/raw/7e9ca4c4772fc50780bfe9997e1c43525e1b7445/cornichon_airdrop.csv', # noqa: E501\n A_CORN,\n 'https://cornichon.ape.tax/',\n ),\n 'grain': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/08d7a5b28876741b300c944650c89280/raw/987ab4a92d5363fdbe262f639565732bd1fd3921/grain_iou.csv', # noqa: E501\n A_GRAIN,\n 'https://claim.harvest.finance/',\n ),\n}\n\n\ndef get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:\n airdrops_dir = data_dir / 'airdrops'\n airdrops_dir.mkdir(parents=True, exist_ok=True)\n filename = airdrops_dir / f'{name}.csv'\n if not filename.is_file():\n # if not cached, get it from the gist\n try:\n request = requests.get(AIRDROPS[name][0])\n except requests.exceptions.RequestException as e:\n raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e\n\n with open(filename, 'w') as f:\n f.write(request.content.decode('utf-8'))\n\n csvfile = open(filename, 'r')\n iterator = csv.reader(csvfile)\n next(iterator) # skip header\n return iterator, csvfile\n\n\ndef check_airdrops(\n addresses: List[ChecksumEthAddress],\n data_dir: Path,\n) -> Dict[ChecksumEthAddress, Dict]:\n \"\"\"Checks airdrop data for the given list of ethereum addresses\n\n May raise:\n - RemoteError if the remote request fails\n \"\"\"\n found_data: Dict[ChecksumEthAddress, Dict] = defaultdict(lambda: defaultdict(dict))\n for protocol_name, airdrop_data in AIRDROPS.items():\n data, csvfile = get_airdrop_data(protocol_name, data_dir)\n for addr, amount, *_ in data:\n # not doing to_checksum_address() here since the file addresses are checksummed\n # and doing to_checksum_address() so many times hits performance\n if protocol_name in ('cornichon', 'tornado', 'grain'):\n amount = token_normalized_value_decimals(int(amount), 18)\n if addr in addresses:\n found_data[addr][protocol_name] = {\n 'amount': str(amount),\n 'asset': airdrop_data[1],\n 'link': airdrop_data[2],\n }\n csvfile.close()\n\n return dict(found_data)\n", "path": "rotkehlchen/chain/ethereum/airdrops.py"}], "after_files": [{"content": "from rotkehlchen.assets.asset import Asset, EthereumToken\n\nA_USD = Asset('USD')\nA_EUR = Asset('EUR')\nA_GBP = Asset('GBP')\nA_JPY = Asset('JPY')\nA_CNY = Asset('CNY')\nA_CAD = Asset('CAD')\nA_KRW = Asset('KRW')\nA_RUB = Asset('RUB')\nA_CHF = Asset('CHF')\nA_TRY = Asset('TRY')\nA_ZAR = Asset('ZAR')\nA_AUD = Asset('AUD')\nA_NZD = Asset('NZD')\nA_BRL = Asset('BRL')\nFIAT_CURRENCIES = (\n A_USD,\n A_EUR,\n A_GBP,\n A_JPY,\n A_CNY,\n A_CAD,\n A_KRW,\n A_RUB,\n A_CHF,\n A_TRY,\n A_ZAR,\n A_AUD,\n A_NZD,\n A_BRL,\n)\n\nS_BTC = 'BTC'\nS_ETH = 'ETH'\nS_KSM = 'KSM'\n\nA_BTC = Asset(S_BTC)\nA_BCH = Asset('BCH')\nA_BAL = Asset('BAL')\nA_BSV = Asset('BSV')\nA_ETH = Asset(S_ETH)\nA_ETH2 = Asset('ETH2')\nA_ETC = Asset('ETC')\nA_KSM = Asset(S_KSM)\nA_BAT = EthereumToken('BAT')\nA_UNI = EthereumToken('UNI')\nA_1INCH = EthereumToken('1INCH')\nA_DAI = EthereumToken('DAI')\nA_SAI = EthereumToken('SAI')\nA_YFI = EthereumToken('YFI')\nA_USDT = EthereumToken('USDT')\nA_USDC = EthereumToken('USDC')\nA_TUSD = EthereumToken('TUSD')\nA_ALINK = EthereumToken('aLINK')\nA_GUSD = EthereumToken('GUSD')\nA_CRV = EthereumToken('CRV')\nA_KNC = EthereumToken('KNC')\nA_WBTC = EthereumToken('WBTC')\nA_WETH = EthereumToken('WETH')\nA_ZRX = EthereumToken('ZRX')\nA_MANA = EthereumToken('MANA')\nA_PAX = EthereumToken('PAX')\nA_COMP = EthereumToken('COMP')\nA_LRC = EthereumToken('LRC')\nA_LINK = EthereumToken('LINK')\nA_ADX = EthereumToken('ADX')\nA_TORN = EthereumToken('TORN')\nA_CORN = EthereumToken('CORN-2')\nA_GRAIN = EthereumToken('GRAIN')\nA_COMBO = EthereumToken('COMBO')\nA_LDO = EthereumToken('LDO')\n", "path": "rotkehlchen/constants/assets.py"}, {"content": "import csv\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Iterator, List, TextIO, Tuple\n\nimport requests\n\nfrom rotkehlchen.chain.ethereum.utils import token_normalized_value_decimals\nfrom rotkehlchen.constants.assets import A_1INCH, A_COMBO, A_CORN, A_GRAIN, A_LDO, A_TORN, A_UNI\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.typing import ChecksumEthAddress\n\nAIRDROPS = {\n 'uniswap': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/d883cb7187a7c4fcf98c7a62f45568e7/raw/3718c95d572a29b9c3906d7c64726d3bd7524bfd/uniswap.csv', # noqa: E501\n A_UNI,\n 'https://app.uniswap.org/',\n ),\n '1inch': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/8f41d1511bf354d7e56810188116a410/raw/87d967e86e1435aa3a9ddb97ce20531e4e52dbad/1inch.csv', # noqa: E501\n A_1INCH,\n 'https://1inch.exchange/',\n ),\n 'tornado': (\n # is checksummed\n 'https://raw.githubusercontent.com/tornadocash/airdrop/master/airdrop.csv',\n A_TORN, # Don't have TORN token yet?\n 'https://tornado.cash/',\n ),\n 'cornichon': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/5199d8bc6caa3253c343cd5084489088/raw/7e9ca4c4772fc50780bfe9997e1c43525e1b7445/cornichon_airdrop.csv', # noqa: E501\n A_CORN,\n 'https://cornichon.ape.tax/',\n ),\n 'grain': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/08d7a5b28876741b300c944650c89280/raw/987ab4a92d5363fdbe262f639565732bd1fd3921/grain_iou.csv', # noqa: E501\n A_GRAIN,\n 'https://claim.harvest.finance/',\n ),\n 'furucombo': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/69612e155e8063fd6b3422d4efbf22a3/raw/b9023960ab1c478ee2620c456e208e5124115c19/furucombo_airdrop.csv', # noqa: E501\n A_COMBO,\n 'https://furucombo.app/',\n ),\n 'lido': (\n # is checksummed\n 'https://gist.githubusercontent.com/LefterisJP/57a8d65280a482fed6f3e2cc00c0e540/raw/e6ebac56c438cc8a882585c5f5bfba64eb57c424/lido_airdrop.csv', # noqa: E501\n A_LDO,\n 'https://lido.fi/',\n ),\n}\n\n\ndef get_airdrop_data(name: str, data_dir: Path) -> Tuple[Iterator, TextIO]:\n airdrops_dir = data_dir / 'airdrops'\n airdrops_dir.mkdir(parents=True, exist_ok=True)\n filename = airdrops_dir / f'{name}.csv'\n if not filename.is_file():\n # if not cached, get it from the gist\n try:\n request = requests.get(AIRDROPS[name][0])\n except requests.exceptions.RequestException as e:\n raise RemoteError(f'Airdrops Gist request failed due to {str(e)}') from e\n\n with open(filename, 'w') as f:\n f.write(request.content.decode('utf-8'))\n\n csvfile = open(filename, 'r')\n iterator = csv.reader(csvfile)\n next(iterator) # skip header\n return iterator, csvfile\n\n\ndef check_airdrops(\n addresses: List[ChecksumEthAddress],\n data_dir: Path,\n) -> Dict[ChecksumEthAddress, Dict]:\n \"\"\"Checks airdrop data for the given list of ethereum addresses\n\n May raise:\n - RemoteError if the remote request fails\n \"\"\"\n found_data: Dict[ChecksumEthAddress, Dict] = defaultdict(lambda: defaultdict(dict))\n for protocol_name, airdrop_data in AIRDROPS.items():\n data, csvfile = get_airdrop_data(protocol_name, data_dir)\n for addr, amount, *_ in data:\n # not doing to_checksum_address() here since the file addresses are checksummed\n # and doing to_checksum_address() so many times hits performance\n if protocol_name in ('cornichon', 'tornado', 'grain', 'lido'):\n amount = token_normalized_value_decimals(int(amount), 18)\n if addr in addresses:\n found_data[addr][protocol_name] = {\n 'amount': str(amount),\n 'asset': airdrop_data[1],\n 'link': airdrop_data[2],\n }\n csvfile.close()\n\n return dict(found_data)\n", "path": "rotkehlchen/chain/ethereum/airdrops.py"}]}
| 2,594 | 800 |
gh_patches_debug_7927
|
rasdani/github-patches
|
git_diff
|
huggingface__transformers-4747
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Loading config file bug
Hi guys.
I have released a new (XLNet based) transformer model for low-resource language Tigrinya
[(TigXLNet)](https://github.com/abryeemessi/Transferring-Monolingual-Model-to-Low-Resource-Language) and found a bug when loading a pre-trained config file:
My config file looks like:
https://s3.amazonaws.com/models.huggingface.co/bert/abryee/TigXLNet/config.json
config = AutoConfig.from_pretrained("abryee/TigXLNet")
print(config.d_head) #prints 48 even though d_head in the given config file is 64.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/transformers/configuration_xlnet.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
3 # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 """ XLNet configuration """
17
18
19 import logging
20
21 from .configuration_utils import PretrainedConfig
22
23
24 logger = logging.getLogger(__name__)
25
26 XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27 "xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json",
28 "xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json",
29 }
30
31
32 class XLNetConfig(PretrainedConfig):
33 """
34 This is the configuration class to store the configuration of a :class:`~transformers.XLNetModel`.
35 It is used to instantiate an XLNet model according to the specified arguments, defining the model
36 architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
37 the `xlnet-large-cased <https://huggingface.co/xlnet-large-cased>`__ architecture.
38
39 Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
40 to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
41 for more information.
42
43 Args:
44 vocab_size (:obj:`int`, optional, defaults to 32000):
45 Vocabulary size of the XLNet model. Defines the different tokens that
46 can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.XLNetModel`.
47 d_model (:obj:`int`, optional, defaults to 1024):
48 Dimensionality of the encoder layers and the pooler layer.
49 n_layer (:obj:`int`, optional, defaults to 24):
50 Number of hidden layers in the Transformer encoder.
51 n_head (:obj:`int`, optional, defaults to 16):
52 Number of attention heads for each attention layer in the Transformer encoder.
53 d_inner (:obj:`int`, optional, defaults to 4096):
54 Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
55 ff_activation (:obj:`string`, optional, defaults to "gelu"):
56 The non-linear activation function (function or string) in the
57 encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
58 untie_r (:obj:`boolean`, optional, defaults to :obj:`True`):
59 Untie relative position biases
60 attn_type (:obj:`string`, optional, defaults to "bi"):
61 The attention type used by the model. Set 'bi' for XLNet, 'uni' for Transformer-XL.
62 initializer_range (:obj:`float`, optional, defaults to 0.02):
63 The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64 layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
65 The epsilon used by the layer normalization layers.
66 dropout (:obj:`float`, optional, defaults to 0.1):
67 The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
68 mem_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):
69 The number of tokens to cache. The key/value pairs that have already been pre-computed
70 in a previous forward pass won't be re-computed. See the
71 `quickstart <https://huggingface.co/transformers/quickstart.html#using-the-past>`__
72 for more information.
73 reuse_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):
74 The number of tokens in the current batch to be cached and reused in the future.
75 bi_data (:obj:`boolean`, optional, defaults to :obj:`False`):
76 Whether to use bidirectional input pipeline. Usually set to `True` during
77 pretraining and `False` during finetuning.
78 clamp_len (:obj:`int`, optional, defaults to -1):
79 Clamp all relative distances larger than clamp_len.
80 Setting this attribute to -1 means no clamping.
81 same_length (:obj:`boolean`, optional, defaults to :obj:`False`):
82 Whether to use the same attention length for each token.
83 summary_type (:obj:`string`, optional, defaults to "last"):
84 Argument used when doing sequence summary. Used in for the multiple choice head in
85 :class:transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
86 Is one of the following options:
87 - 'last' => take the last token hidden state (like XLNet)
88 - 'first' => take the first token hidden state (like Bert)
89 - 'mean' => take the mean of all tokens hidden states
90 - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
91 - 'attn' => Not implemented now, use multi-head attention
92 summary_use_proj (:obj:`boolean`, optional, defaults to :obj:`True`):
93 Argument used when doing sequence summary. Used in for the multiple choice head in
94 :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
95 Add a projection after the vector extraction
96 summary_activation (:obj:`string` or :obj:`None`, optional, defaults to :obj:`None`):
97 Argument used when doing sequence summary. Used in for the multiple choice head in
98 :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
99 'tanh' => add a tanh activation to the output, Other => no activation.
100 summary_proj_to_labels (:obj:`boolean`, optional, defaults to :obj:`True`):
101 Argument used when doing sequence summary. Used in for the multiple choice head in
102 :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
103 If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
104 summary_last_dropout (:obj:`float`, optional, defaults to 0.1):
105 Argument used when doing sequence summary. Used in for the multiple choice head in
106 :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
107 Add a dropout after the projection and activation
108 start_n_top (:obj:`int`, optional, defaults to 5):
109 Used in the SQuAD evaluation script for XLM and XLNet.
110 end_n_top (:obj:`int`, optional, defaults to 5):
111 Used in the SQuAD evaluation script for XLM and XLNet.
112
113 Example::
114
115 from transformers import XLNetConfig, XLNetModel
116
117 # Initializing a XLNet configuration
118 configuration = XLNetConfig()
119
120 # Initializing a model from the configuration
121 model = XLNetModel(configuration)
122
123 # Accessing the model configuration
124 configuration = model.config
125 """
126
127 model_type = "xlnet"
128
129 def __init__(
130 self,
131 vocab_size=32000,
132 d_model=1024,
133 n_layer=24,
134 n_head=16,
135 d_inner=4096,
136 ff_activation="gelu",
137 untie_r=True,
138 attn_type="bi",
139 initializer_range=0.02,
140 layer_norm_eps=1e-12,
141 dropout=0.1,
142 mem_len=None,
143 reuse_len=None,
144 bi_data=False,
145 clamp_len=-1,
146 same_length=False,
147 summary_type="last",
148 summary_use_proj=True,
149 summary_activation="tanh",
150 summary_last_dropout=0.1,
151 start_n_top=5,
152 end_n_top=5,
153 pad_token_id=5,
154 bos_token_id=1,
155 eos_token_id=2,
156 **kwargs
157 ):
158 """Constructs XLNetConfig.
159 """
160 super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
161 self.vocab_size = vocab_size
162 self.d_model = d_model
163 self.n_layer = n_layer
164 self.n_head = n_head
165 assert d_model % n_head == 0
166 self.d_head = d_model // n_head
167 self.ff_activation = ff_activation
168 self.d_inner = d_inner
169 self.untie_r = untie_r
170 self.attn_type = attn_type
171
172 self.initializer_range = initializer_range
173 self.layer_norm_eps = layer_norm_eps
174
175 self.dropout = dropout
176 self.mem_len = mem_len
177 self.reuse_len = reuse_len
178 self.bi_data = bi_data
179 self.clamp_len = clamp_len
180 self.same_length = same_length
181
182 self.summary_type = summary_type
183 self.summary_use_proj = summary_use_proj
184 self.summary_activation = summary_activation
185 self.summary_last_dropout = summary_last_dropout
186 self.start_n_top = start_n_top
187 self.end_n_top = end_n_top
188
189 self.bos_token_id = bos_token_id
190 self.pad_token_id = pad_token_id
191 self.eos_token_id = eos_token_id
192
193 @property
194 def max_position_embeddings(self):
195 return -1
196
197 @property
198 def n_token(self): # Backward compatibility
199 return self.vocab_size
200
201 @n_token.setter
202 def n_token(self, value): # Backward compatibility
203 self.vocab_size = value
204
205 @property
206 def hidden_size(self):
207 return self.d_model
208
209 @property
210 def num_attention_heads(self):
211 return self.n_head
212
213 @property
214 def num_hidden_layers(self):
215 return self.n_layer
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/transformers/configuration_xlnet.py b/src/transformers/configuration_xlnet.py
--- a/src/transformers/configuration_xlnet.py
+++ b/src/transformers/configuration_xlnet.py
@@ -163,6 +163,10 @@
self.n_layer = n_layer
self.n_head = n_head
assert d_model % n_head == 0
+ if "d_head" in kwargs:
+ assert kwargs["d_head"] == d_model // n_head, (
+ f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})"
+ )
self.d_head = d_model // n_head
self.ff_activation = ff_activation
self.d_inner = d_inner
|
{"golden_diff": "diff --git a/src/transformers/configuration_xlnet.py b/src/transformers/configuration_xlnet.py\n--- a/src/transformers/configuration_xlnet.py\n+++ b/src/transformers/configuration_xlnet.py\n@@ -163,6 +163,10 @@\n self.n_layer = n_layer\n self.n_head = n_head\n assert d_model % n_head == 0\n+ if \"d_head\" in kwargs:\n+ assert kwargs[\"d_head\"] == d_model // n_head, (\n+ f\"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})\"\n+ )\n self.d_head = d_model // n_head\n self.ff_activation = ff_activation\n self.d_inner = d_inner\n", "issue": "Loading config file bug\nHi guys.\r\n\r\nI have released a new (XLNet based) transformer model for low-resource language Tigrinya \r\n [(TigXLNet)](https://github.com/abryeemessi/Transferring-Monolingual-Model-to-Low-Resource-Language) and found a bug when loading a pre-trained config file: \r\n\r\nMy config file looks like:\r\nhttps://s3.amazonaws.com/models.huggingface.co/bert/abryee/TigXLNet/config.json\r\n\r\n\r\n config = AutoConfig.from_pretrained(\"abryee/TigXLNet\")\r\n print(config.d_head) #prints 48 even though d_head in the given config file is 64.\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" XLNet configuration \"\"\"\n\n\nimport logging\n\nfrom .configuration_utils import PretrainedConfig\n\n\nlogger = logging.getLogger(__name__)\n\nXLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n \"xlnet-base-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json\",\n \"xlnet-large-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json\",\n}\n\n\nclass XLNetConfig(PretrainedConfig):\n \"\"\"\n This is the configuration class to store the configuration of a :class:`~transformers.XLNetModel`.\n It is used to instantiate an XLNet model according to the specified arguments, defining the model\n architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n the `xlnet-large-cased <https://huggingface.co/xlnet-large-cased>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n Args:\n vocab_size (:obj:`int`, optional, defaults to 32000):\n Vocabulary size of the XLNet model. Defines the different tokens that\n can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.XLNetModel`.\n d_model (:obj:`int`, optional, defaults to 1024):\n Dimensionality of the encoder layers and the pooler layer.\n n_layer (:obj:`int`, optional, defaults to 24):\n Number of hidden layers in the Transformer encoder.\n n_head (:obj:`int`, optional, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n d_inner (:obj:`int`, optional, defaults to 4096):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n ff_activation (:obj:`string`, optional, defaults to \"gelu\"):\n The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n untie_r (:obj:`boolean`, optional, defaults to :obj:`True`):\n Untie relative position biases\n attn_type (:obj:`string`, optional, defaults to \"bi\"):\n The attention type used by the model. Set 'bi' for XLNet, 'uni' for Transformer-XL.\n initializer_range (:obj:`float`, optional, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n mem_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):\n The number of tokens to cache. The key/value pairs that have already been pre-computed\n in a previous forward pass won't be re-computed. See the\n `quickstart <https://huggingface.co/transformers/quickstart.html#using-the-past>`__\n for more information.\n reuse_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):\n The number of tokens in the current batch to be cached and reused in the future.\n bi_data (:obj:`boolean`, optional, defaults to :obj:`False`):\n Whether to use bidirectional input pipeline. Usually set to `True` during\n pretraining and `False` during finetuning.\n clamp_len (:obj:`int`, optional, defaults to -1):\n Clamp all relative distances larger than clamp_len.\n Setting this attribute to -1 means no clamping.\n same_length (:obj:`boolean`, optional, defaults to :obj:`False`):\n Whether to use the same attention length for each token.\n summary_type (:obj:`string`, optional, defaults to \"last\"):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n Is one of the following options:\n - 'last' => take the last token hidden state (like XLNet)\n - 'first' => take the first token hidden state (like Bert)\n - 'mean' => take the mean of all tokens hidden states\n - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)\n - 'attn' => Not implemented now, use multi-head attention\n summary_use_proj (:obj:`boolean`, optional, defaults to :obj:`True`):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n Add a projection after the vector extraction\n summary_activation (:obj:`string` or :obj:`None`, optional, defaults to :obj:`None`):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n 'tanh' => add a tanh activation to the output, Other => no activation.\n summary_proj_to_labels (:obj:`boolean`, optional, defaults to :obj:`True`):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.\n summary_last_dropout (:obj:`float`, optional, defaults to 0.1):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n Add a dropout after the projection and activation\n start_n_top (:obj:`int`, optional, defaults to 5):\n Used in the SQuAD evaluation script for XLM and XLNet.\n end_n_top (:obj:`int`, optional, defaults to 5):\n Used in the SQuAD evaluation script for XLM and XLNet.\n\n Example::\n\n from transformers import XLNetConfig, XLNetModel\n\n # Initializing a XLNet configuration\n configuration = XLNetConfig()\n\n # Initializing a model from the configuration\n model = XLNetModel(configuration)\n\n # Accessing the model configuration\n configuration = model.config\n \"\"\"\n\n model_type = \"xlnet\"\n\n def __init__(\n self,\n vocab_size=32000,\n d_model=1024,\n n_layer=24,\n n_head=16,\n d_inner=4096,\n ff_activation=\"gelu\",\n untie_r=True,\n attn_type=\"bi\",\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n dropout=0.1,\n mem_len=None,\n reuse_len=None,\n bi_data=False,\n clamp_len=-1,\n same_length=False,\n summary_type=\"last\",\n summary_use_proj=True,\n summary_activation=\"tanh\",\n summary_last_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n pad_token_id=5,\n bos_token_id=1,\n eos_token_id=2,\n **kwargs\n ):\n \"\"\"Constructs XLNetConfig.\n \"\"\"\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.d_model = d_model\n self.n_layer = n_layer\n self.n_head = n_head\n assert d_model % n_head == 0\n self.d_head = d_model // n_head\n self.ff_activation = ff_activation\n self.d_inner = d_inner\n self.untie_r = untie_r\n self.attn_type = attn_type\n\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n\n self.dropout = dropout\n self.mem_len = mem_len\n self.reuse_len = reuse_len\n self.bi_data = bi_data\n self.clamp_len = clamp_len\n self.same_length = same_length\n\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_last_dropout = summary_last_dropout\n self.start_n_top = start_n_top\n self.end_n_top = end_n_top\n\n self.bos_token_id = bos_token_id\n self.pad_token_id = pad_token_id\n self.eos_token_id = eos_token_id\n\n @property\n def max_position_embeddings(self):\n return -1\n\n @property\n def n_token(self): # Backward compatibility\n return self.vocab_size\n\n @n_token.setter\n def n_token(self, value): # Backward compatibility\n self.vocab_size = value\n\n @property\n def hidden_size(self):\n return self.d_model\n\n @property\n def num_attention_heads(self):\n return self.n_head\n\n @property\n def num_hidden_layers(self):\n return self.n_layer\n", "path": "src/transformers/configuration_xlnet.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" XLNet configuration \"\"\"\n\n\nimport logging\n\nfrom .configuration_utils import PretrainedConfig\n\n\nlogger = logging.getLogger(__name__)\n\nXLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n \"xlnet-base-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json\",\n \"xlnet-large-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json\",\n}\n\n\nclass XLNetConfig(PretrainedConfig):\n \"\"\"\n This is the configuration class to store the configuration of a :class:`~transformers.XLNetModel`.\n It is used to instantiate an XLNet model according to the specified arguments, defining the model\n architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n the `xlnet-large-cased <https://huggingface.co/xlnet-large-cased>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n Args:\n vocab_size (:obj:`int`, optional, defaults to 32000):\n Vocabulary size of the XLNet model. Defines the different tokens that\n can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.XLNetModel`.\n d_model (:obj:`int`, optional, defaults to 1024):\n Dimensionality of the encoder layers and the pooler layer.\n n_layer (:obj:`int`, optional, defaults to 24):\n Number of hidden layers in the Transformer encoder.\n n_head (:obj:`int`, optional, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n d_inner (:obj:`int`, optional, defaults to 4096):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n ff_activation (:obj:`string`, optional, defaults to \"gelu\"):\n The non-linear activation function (function or string) in the\n encoder and pooler. If string, \"gelu\", \"relu\" and \"swish\" are supported.\n untie_r (:obj:`boolean`, optional, defaults to :obj:`True`):\n Untie relative position biases\n attn_type (:obj:`string`, optional, defaults to \"bi\"):\n The attention type used by the model. Set 'bi' for XLNet, 'uni' for Transformer-XL.\n initializer_range (:obj:`float`, optional, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n mem_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):\n The number of tokens to cache. The key/value pairs that have already been pre-computed\n in a previous forward pass won't be re-computed. See the\n `quickstart <https://huggingface.co/transformers/quickstart.html#using-the-past>`__\n for more information.\n reuse_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):\n The number of tokens in the current batch to be cached and reused in the future.\n bi_data (:obj:`boolean`, optional, defaults to :obj:`False`):\n Whether to use bidirectional input pipeline. Usually set to `True` during\n pretraining and `False` during finetuning.\n clamp_len (:obj:`int`, optional, defaults to -1):\n Clamp all relative distances larger than clamp_len.\n Setting this attribute to -1 means no clamping.\n same_length (:obj:`boolean`, optional, defaults to :obj:`False`):\n Whether to use the same attention length for each token.\n summary_type (:obj:`string`, optional, defaults to \"last\"):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n Is one of the following options:\n - 'last' => take the last token hidden state (like XLNet)\n - 'first' => take the first token hidden state (like Bert)\n - 'mean' => take the mean of all tokens hidden states\n - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)\n - 'attn' => Not implemented now, use multi-head attention\n summary_use_proj (:obj:`boolean`, optional, defaults to :obj:`True`):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n Add a projection after the vector extraction\n summary_activation (:obj:`string` or :obj:`None`, optional, defaults to :obj:`None`):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n 'tanh' => add a tanh activation to the output, Other => no activation.\n summary_proj_to_labels (:obj:`boolean`, optional, defaults to :obj:`True`):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.\n summary_last_dropout (:obj:`float`, optional, defaults to 0.1):\n Argument used when doing sequence summary. Used in for the multiple choice head in\n :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.\n Add a dropout after the projection and activation\n start_n_top (:obj:`int`, optional, defaults to 5):\n Used in the SQuAD evaluation script for XLM and XLNet.\n end_n_top (:obj:`int`, optional, defaults to 5):\n Used in the SQuAD evaluation script for XLM and XLNet.\n\n Example::\n\n from transformers import XLNetConfig, XLNetModel\n\n # Initializing a XLNet configuration\n configuration = XLNetConfig()\n\n # Initializing a model from the configuration\n model = XLNetModel(configuration)\n\n # Accessing the model configuration\n configuration = model.config\n \"\"\"\n\n model_type = \"xlnet\"\n\n def __init__(\n self,\n vocab_size=32000,\n d_model=1024,\n n_layer=24,\n n_head=16,\n d_inner=4096,\n ff_activation=\"gelu\",\n untie_r=True,\n attn_type=\"bi\",\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n dropout=0.1,\n mem_len=None,\n reuse_len=None,\n bi_data=False,\n clamp_len=-1,\n same_length=False,\n summary_type=\"last\",\n summary_use_proj=True,\n summary_activation=\"tanh\",\n summary_last_dropout=0.1,\n start_n_top=5,\n end_n_top=5,\n pad_token_id=5,\n bos_token_id=1,\n eos_token_id=2,\n **kwargs\n ):\n \"\"\"Constructs XLNetConfig.\n \"\"\"\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.d_model = d_model\n self.n_layer = n_layer\n self.n_head = n_head\n assert d_model % n_head == 0\n if \"d_head\" in kwargs:\n assert kwargs[\"d_head\"] == d_model // n_head, (\n f\"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})\"\n )\n self.d_head = d_model // n_head\n self.ff_activation = ff_activation\n self.d_inner = d_inner\n self.untie_r = untie_r\n self.attn_type = attn_type\n\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n\n self.dropout = dropout\n self.mem_len = mem_len\n self.reuse_len = reuse_len\n self.bi_data = bi_data\n self.clamp_len = clamp_len\n self.same_length = same_length\n\n self.summary_type = summary_type\n self.summary_use_proj = summary_use_proj\n self.summary_activation = summary_activation\n self.summary_last_dropout = summary_last_dropout\n self.start_n_top = start_n_top\n self.end_n_top = end_n_top\n\n self.bos_token_id = bos_token_id\n self.pad_token_id = pad_token_id\n self.eos_token_id = eos_token_id\n\n @property\n def max_position_embeddings(self):\n return -1\n\n @property\n def n_token(self): # Backward compatibility\n return self.vocab_size\n\n @n_token.setter\n def n_token(self, value): # Backward compatibility\n self.vocab_size = value\n\n @property\n def hidden_size(self):\n return self.d_model\n\n @property\n def num_attention_heads(self):\n return self.n_head\n\n @property\n def num_hidden_layers(self):\n return self.n_layer\n", "path": "src/transformers/configuration_xlnet.py"}]}
| 3,244 | 178 |
gh_patches_debug_64390
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tmobile_us is broken
During the global build at 2021-06-30-14-42-26, spider **tmobile_us** failed with **7563 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/tmobile_us.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tmobile_us.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 from urllib.parse import urlencode
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Monday': 'Mo',
11 'Tuesday': 'Tu',
12 'Wednesday': 'We',
13 'Thursday': 'Th',
14 'Friday': 'Fr',
15 'Saturday': 'Sa',
16 'Sunday': 'Su'}
17
18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
19
20
21 class TMobileUSSpider(scrapy.Spider):
22 name = "tmobile_us"
23 item_attributes = { 'brand': "T-Mobile" }
24 allowed_domains = ["www.t-mobile.com"]
25 download_delay = 0.2
26
27 def parse_hours(self, store_hours):
28 opening_hours = OpeningHours()
29 if store_hours is None:
30 return
31
32 for store_day in store_hours:
33 day = DAY_MAPPING[store_day.get("day")]
34 open_time = store_day.get("opens")
35 close_time = store_day.get("closes")
36 if open_time is None and close_time is None:
37 continue
38 opening_hours.add_range(day=day,
39 open_time=open_time,
40 close_time=close_time,
41 time_format='%H:%M'
42 )
43
44 return opening_hours.as_opening_hours()
45
46 def start_requests(self):
47 url = BASE_URL
48
49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
50
51 next(points) # Ignore the header
52 for point in points:
53 _, lat, lon = point.strip().split(',')
54
55 params = {
56 'latitude': '{}'.format(lat),
57 'longitude': '{}'.format(lon),
58 'count': '1000',
59 'radius': '25',
60 'ignoreLoadingBar': 'false'
61 }
62
63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse)
64
65 def parse(self, response):
66 data = json.loads(response.body_as_unicode())
67
68 for store in data:
69 properties = {
70 'name': store["name"],
71 'ref': store["id"],
72 'addr_full': store["location"]["address"]["streetAddress"],
73 'city': store["location"]["address"]["addressLocality"],
74 'state': store["location"]["address"]["addressRegion"],
75 'postcode': store["location"]["address"]["postalCode"],
76 'phone': store.get("telephone"),
77 'website': store.get("url") or response.url,
78 'lat': float(store["location"]["latitude"]),
79 'lon': float(store["location"]["longitude"]),
80 }
81
82 hours = self.parse_hours(store.get("hours", []))
83 if hours:
84 properties["opening_hours"] = hours
85
86 yield GeojsonPointItem(**properties)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py
--- a/locations/spiders/tmobile_us.py
+++ b/locations/spiders/tmobile_us.py
@@ -15,7 +15,7 @@
'Saturday': 'Sa',
'Sunday': 'Su'}
-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'
class TMobileUSSpider(scrapy.Spider):
|
{"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,7 +15,7 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-06-30-14-42-26, spider **tmobile_us** failed with **7563 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/tmobile_us.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]}
| 1,227 | 150 |
gh_patches_debug_25740
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-2605
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`hypothesis --version` does not work
When I use `pip install hypothesis` in a clear env and run `hypothesis --version`, this happens:
```python
» hypothesis --version
Traceback (most recent call last):
File "/Users/sobolev/Documents/github/returns/.venv/bin/hypothesis", line 6, in <module>
from hypothesis.extra.cli import main
File "/Users/sobolev/Documents/github/returns/.venv/lib/python3.7/site-packages/hypothesis/extra/cli.py", line 35, in <module>
from hypothesis.extra import ghostwriter
File "/Users/sobolev/Documents/github/returns/.venv/lib/python3.7/site-packages/hypothesis/extra/ghostwriter.py", line 57, in <module>
import black
ModuleNotFoundError: No module named 'black'
```
Looks like `black` is treated as a required dependency right now.
After installing `black`:
```
» hypothesis --version
hypothesis, version 5.33.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hypothesis-python/src/hypothesis/extra/cli.py`
Content:
```
1 # This file is part of Hypothesis, which may be found at
2 # https://github.com/HypothesisWorks/hypothesis/
3 #
4 # Most of this work is copyright (C) 2013-2020 David R. MacIver
5 # ([email protected]), but it contains contributions by others. See
6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
7 # consult the git log if you need to determine who owns an individual
8 # contribution.
9 #
10 # This Source Code Form is subject to the terms of the Mozilla Public License,
11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
12 # obtain one at https://mozilla.org/MPL/2.0/.
13 #
14 # END HEADER
15
16 """
17 .. _hypothesis-cli:
18
19 ----------------
20 hypothesis[cli]
21 ----------------
22
23 This module provides Hypothesis' command-line interface, for e.g.
24 :doc:`'ghostwriting' tests <ghostwriter>` via the terminal.
25 It requires the :pypi:`click` package.
26
27 Run :command:`hypothesis --help` in your terminal for details.
28 """
29
30 import builtins
31 import importlib
32 import sys
33 from difflib import get_close_matches
34
35 from hypothesis.extra import ghostwriter
36
37 try:
38 import click
39 except ImportError:
40
41 def main():
42 """If `click` is not installed, tell the user to install it then exit."""
43 sys.stderr.write(
44 """
45 The Hypothesis command-line interface requires the `click` package,
46 which you do not have installed. Run:
47
48 python -m pip install --upgrade hypothesis[cli]
49
50 and try again.
51 """
52 )
53 sys.exit(1)
54
55
56 else:
57 # Ensure that Python scripts in the current working directory are importable,
58 # on the principle that Ghostwriter should 'just work' for novice users. Note
59 # that we append rather than prepend to the module search path, so this will
60 # never shadow the stdlib or installed packages.
61 sys.path.append(".")
62
63 @click.group(context_settings={"help_option_names": ("-h", "--help")})
64 @click.version_option()
65 def main():
66 pass
67
68 def obj_name(s: str) -> object:
69 """This "type" imports whatever object is named by a dotted string."""
70 s = s.strip()
71 try:
72 return importlib.import_module(s)
73 except ImportError:
74 pass
75 if "." not in s:
76 modulename, module, funcname = "builtins", builtins, s
77 else:
78 modulename, funcname = s.rsplit(".", 1)
79 try:
80 module = importlib.import_module(modulename)
81 except ImportError:
82 raise click.UsageError(
83 f"Failed to import the {modulename} module for introspection. "
84 "Check spelling and your Python import path, or use the Python API?"
85 )
86 try:
87 return getattr(module, funcname)
88 except AttributeError:
89 public_names = [name for name in vars(module) if not name.startswith("_")]
90 matches = get_close_matches(funcname, public_names)
91 raise click.UsageError(
92 f"Found the {modulename!r} module, but it doesn't have a "
93 f"{funcname!r} attribute."
94 + (f" Closest matches: {matches!r}" if matches else "")
95 )
96
97 @main.command() # type: ignore # Click adds the .command attribute
98 @click.argument("func", type=obj_name, required=True, nargs=-1)
99 @click.option("--idempotent", "writer", flag_value="idempotent")
100 @click.option("--binary-op", "writer", flag_value="binary_operation")
101 @click.option("--equivalent", "writer", flag_value="equivalent")
102 @click.option("--roundtrip", "writer", flag_value="roundtrip")
103 # Note: we deliberately omit a --ufunc flag, because the magic()
104 # detection of ufuncs is both precise and complete.
105 @click.option(
106 "--style",
107 type=click.Choice(["pytest", "unittest"]),
108 default="pytest",
109 help="pytest-style function, or unittest-style method?",
110 )
111 @click.option(
112 "-e",
113 "--except",
114 "except_",
115 type=obj_name,
116 multiple=True,
117 help="dotted name of exception(s) to ignore",
118 )
119 def write(func, writer, except_, style): # noqa: D301 # \b disables autowrap
120 """`hypothesis write` writes property-based tests for you!
121
122 Type annotations are helpful but not required for our advanced introspection
123 and templating logic. Try running the examples below to see how it works:
124
125 \b
126 hypothesis write gzip
127 hypothesis write re.compile --except re.error
128 hypothesis write --style=unittest --idempotent sorted
129 hypothesis write --binary-op operator.add
130 hypothesis write --equivalent ast.literal_eval eval
131 hypothesis write --roundtrip json.dumps json.loads
132 """
133 # NOTE: if you want to call this function from Python, look instead at the
134 # ``hypothesis.extra.ghostwriter`` module. Click-decorated functions have
135 # a different calling convention, and raise SystemExit instead of returning.
136 if writer is None:
137 writer = "magic"
138 elif writer == "idempotent" and len(func) > 1:
139 raise click.UsageError("Test functions for idempotence one at a time.")
140 elif writer == "roundtrip" and len(func) == 1:
141 writer = "idempotent"
142 elif writer == "equivalent" and len(func) == 1:
143 writer = "fuzz"
144
145 print(getattr(ghostwriter, writer)(*func, except_=except_ or (), style=style))
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hypothesis-python/src/hypothesis/extra/cli.py b/hypothesis-python/src/hypothesis/extra/cli.py
--- a/hypothesis-python/src/hypothesis/extra/cli.py
+++ b/hypothesis-python/src/hypothesis/extra/cli.py
@@ -32,7 +32,14 @@
import sys
from difflib import get_close_matches
-from hypothesis.extra import ghostwriter
+MESSAGE = """
+The Hypothesis command-line interface requires the `{}` package,
+which you do not have installed. Run:
+
+ python -m pip install --upgrade hypothesis[cli]
+
+and try again.
+"""
try:
import click
@@ -40,16 +47,7 @@
def main():
"""If `click` is not installed, tell the user to install it then exit."""
- sys.stderr.write(
- """
-The Hypothesis command-line interface requires the `click` package,
-which you do not have installed. Run:
-
- python -m pip install --upgrade hypothesis[cli]
-
-and try again.
-"""
- )
+ sys.stderr.write(MESSAGE.format("click"))
sys.exit(1)
@@ -142,4 +140,10 @@
elif writer == "equivalent" and len(func) == 1:
writer = "fuzz"
+ try:
+ from hypothesis.extra import ghostwriter
+ except ImportError:
+ sys.stderr.write(MESSAGE.format("black"))
+ sys.exit(1)
+
print(getattr(ghostwriter, writer)(*func, except_=except_ or (), style=style))
|
{"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/extra/cli.py b/hypothesis-python/src/hypothesis/extra/cli.py\n--- a/hypothesis-python/src/hypothesis/extra/cli.py\n+++ b/hypothesis-python/src/hypothesis/extra/cli.py\n@@ -32,7 +32,14 @@\n import sys\n from difflib import get_close_matches\n \n-from hypothesis.extra import ghostwriter\n+MESSAGE = \"\"\"\n+The Hypothesis command-line interface requires the `{}` package,\n+which you do not have installed. Run:\n+\n+ python -m pip install --upgrade hypothesis[cli]\n+\n+and try again.\n+\"\"\"\n \n try:\n import click\n@@ -40,16 +47,7 @@\n \n def main():\n \"\"\"If `click` is not installed, tell the user to install it then exit.\"\"\"\n- sys.stderr.write(\n- \"\"\"\n-The Hypothesis command-line interface requires the `click` package,\n-which you do not have installed. Run:\n-\n- python -m pip install --upgrade hypothesis[cli]\n-\n-and try again.\n-\"\"\"\n- )\n+ sys.stderr.write(MESSAGE.format(\"click\"))\n sys.exit(1)\n \n \n@@ -142,4 +140,10 @@\n elif writer == \"equivalent\" and len(func) == 1:\n writer = \"fuzz\"\n \n+ try:\n+ from hypothesis.extra import ghostwriter\n+ except ImportError:\n+ sys.stderr.write(MESSAGE.format(\"black\"))\n+ sys.exit(1)\n+\n print(getattr(ghostwriter, writer)(*func, except_=except_ or (), style=style))\n", "issue": "`hypothesis --version` does not work\nWhen I use `pip install hypothesis` in a clear env and run `hypothesis --version`, this happens:\r\n\r\n```python\r\n\u00bb hypothesis --version\r\nTraceback (most recent call last):\r\n File \"/Users/sobolev/Documents/github/returns/.venv/bin/hypothesis\", line 6, in <module>\r\n from hypothesis.extra.cli import main\r\n File \"/Users/sobolev/Documents/github/returns/.venv/lib/python3.7/site-packages/hypothesis/extra/cli.py\", line 35, in <module>\r\n from hypothesis.extra import ghostwriter\r\n File \"/Users/sobolev/Documents/github/returns/.venv/lib/python3.7/site-packages/hypothesis/extra/ghostwriter.py\", line 57, in <module>\r\n import black\r\nModuleNotFoundError: No module named 'black'\r\n```\r\n\r\nLooks like `black` is treated as a required dependency right now.\r\n\r\nAfter installing `black`:\r\n\r\n```\r\n\u00bb hypothesis --version\r\nhypothesis, version 5.33.0\r\n```\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n\"\"\"\n.. _hypothesis-cli:\n\n----------------\nhypothesis[cli]\n----------------\n\nThis module provides Hypothesis' command-line interface, for e.g.\n:doc:`'ghostwriting' tests <ghostwriter>` via the terminal.\nIt requires the :pypi:`click` package.\n\nRun :command:`hypothesis --help` in your terminal for details.\n\"\"\"\n\nimport builtins\nimport importlib\nimport sys\nfrom difflib import get_close_matches\n\nfrom hypothesis.extra import ghostwriter\n\ntry:\n import click\nexcept ImportError:\n\n def main():\n \"\"\"If `click` is not installed, tell the user to install it then exit.\"\"\"\n sys.stderr.write(\n \"\"\"\nThe Hypothesis command-line interface requires the `click` package,\nwhich you do not have installed. Run:\n\n python -m pip install --upgrade hypothesis[cli]\n\nand try again.\n\"\"\"\n )\n sys.exit(1)\n\n\nelse:\n # Ensure that Python scripts in the current working directory are importable,\n # on the principle that Ghostwriter should 'just work' for novice users. Note\n # that we append rather than prepend to the module search path, so this will\n # never shadow the stdlib or installed packages.\n sys.path.append(\".\")\n\n @click.group(context_settings={\"help_option_names\": (\"-h\", \"--help\")})\n @click.version_option()\n def main():\n pass\n\n def obj_name(s: str) -> object:\n \"\"\"This \"type\" imports whatever object is named by a dotted string.\"\"\"\n s = s.strip()\n try:\n return importlib.import_module(s)\n except ImportError:\n pass\n if \".\" not in s:\n modulename, module, funcname = \"builtins\", builtins, s\n else:\n modulename, funcname = s.rsplit(\".\", 1)\n try:\n module = importlib.import_module(modulename)\n except ImportError:\n raise click.UsageError(\n f\"Failed to import the {modulename} module for introspection. \"\n \"Check spelling and your Python import path, or use the Python API?\"\n )\n try:\n return getattr(module, funcname)\n except AttributeError:\n public_names = [name for name in vars(module) if not name.startswith(\"_\")]\n matches = get_close_matches(funcname, public_names)\n raise click.UsageError(\n f\"Found the {modulename!r} module, but it doesn't have a \"\n f\"{funcname!r} attribute.\"\n + (f\" Closest matches: {matches!r}\" if matches else \"\")\n )\n\n @main.command() # type: ignore # Click adds the .command attribute\n @click.argument(\"func\", type=obj_name, required=True, nargs=-1)\n @click.option(\"--idempotent\", \"writer\", flag_value=\"idempotent\")\n @click.option(\"--binary-op\", \"writer\", flag_value=\"binary_operation\")\n @click.option(\"--equivalent\", \"writer\", flag_value=\"equivalent\")\n @click.option(\"--roundtrip\", \"writer\", flag_value=\"roundtrip\")\n # Note: we deliberately omit a --ufunc flag, because the magic()\n # detection of ufuncs is both precise and complete.\n @click.option(\n \"--style\",\n type=click.Choice([\"pytest\", \"unittest\"]),\n default=\"pytest\",\n help=\"pytest-style function, or unittest-style method?\",\n )\n @click.option(\n \"-e\",\n \"--except\",\n \"except_\",\n type=obj_name,\n multiple=True,\n help=\"dotted name of exception(s) to ignore\",\n )\n def write(func, writer, except_, style): # noqa: D301 # \\b disables autowrap\n \"\"\"`hypothesis write` writes property-based tests for you!\n\n Type annotations are helpful but not required for our advanced introspection\n and templating logic. Try running the examples below to see how it works:\n\n \\b\n hypothesis write gzip\n hypothesis write re.compile --except re.error\n hypothesis write --style=unittest --idempotent sorted\n hypothesis write --binary-op operator.add\n hypothesis write --equivalent ast.literal_eval eval\n hypothesis write --roundtrip json.dumps json.loads\n \"\"\"\n # NOTE: if you want to call this function from Python, look instead at the\n # ``hypothesis.extra.ghostwriter`` module. Click-decorated functions have\n # a different calling convention, and raise SystemExit instead of returning.\n if writer is None:\n writer = \"magic\"\n elif writer == \"idempotent\" and len(func) > 1:\n raise click.UsageError(\"Test functions for idempotence one at a time.\")\n elif writer == \"roundtrip\" and len(func) == 1:\n writer = \"idempotent\"\n elif writer == \"equivalent\" and len(func) == 1:\n writer = \"fuzz\"\n\n print(getattr(ghostwriter, writer)(*func, except_=except_ or (), style=style))\n", "path": "hypothesis-python/src/hypothesis/extra/cli.py"}], "after_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2020 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\n\"\"\"\n.. _hypothesis-cli:\n\n----------------\nhypothesis[cli]\n----------------\n\nThis module provides Hypothesis' command-line interface, for e.g.\n:doc:`'ghostwriting' tests <ghostwriter>` via the terminal.\nIt requires the :pypi:`click` package.\n\nRun :command:`hypothesis --help` in your terminal for details.\n\"\"\"\n\nimport builtins\nimport importlib\nimport sys\nfrom difflib import get_close_matches\n\nMESSAGE = \"\"\"\nThe Hypothesis command-line interface requires the `{}` package,\nwhich you do not have installed. Run:\n\n python -m pip install --upgrade hypothesis[cli]\n\nand try again.\n\"\"\"\n\ntry:\n import click\nexcept ImportError:\n\n def main():\n \"\"\"If `click` is not installed, tell the user to install it then exit.\"\"\"\n sys.stderr.write(MESSAGE.format(\"click\"))\n sys.exit(1)\n\n\nelse:\n # Ensure that Python scripts in the current working directory are importable,\n # on the principle that Ghostwriter should 'just work' for novice users. Note\n # that we append rather than prepend to the module search path, so this will\n # never shadow the stdlib or installed packages.\n sys.path.append(\".\")\n\n @click.group(context_settings={\"help_option_names\": (\"-h\", \"--help\")})\n @click.version_option()\n def main():\n pass\n\n def obj_name(s: str) -> object:\n \"\"\"This \"type\" imports whatever object is named by a dotted string.\"\"\"\n s = s.strip()\n try:\n return importlib.import_module(s)\n except ImportError:\n pass\n if \".\" not in s:\n modulename, module, funcname = \"builtins\", builtins, s\n else:\n modulename, funcname = s.rsplit(\".\", 1)\n try:\n module = importlib.import_module(modulename)\n except ImportError:\n raise click.UsageError(\n f\"Failed to import the {modulename} module for introspection. \"\n \"Check spelling and your Python import path, or use the Python API?\"\n )\n try:\n return getattr(module, funcname)\n except AttributeError:\n public_names = [name for name in vars(module) if not name.startswith(\"_\")]\n matches = get_close_matches(funcname, public_names)\n raise click.UsageError(\n f\"Found the {modulename!r} module, but it doesn't have a \"\n f\"{funcname!r} attribute.\"\n + (f\" Closest matches: {matches!r}\" if matches else \"\")\n )\n\n @main.command() # type: ignore # Click adds the .command attribute\n @click.argument(\"func\", type=obj_name, required=True, nargs=-1)\n @click.option(\"--idempotent\", \"writer\", flag_value=\"idempotent\")\n @click.option(\"--binary-op\", \"writer\", flag_value=\"binary_operation\")\n @click.option(\"--equivalent\", \"writer\", flag_value=\"equivalent\")\n @click.option(\"--roundtrip\", \"writer\", flag_value=\"roundtrip\")\n # Note: we deliberately omit a --ufunc flag, because the magic()\n # detection of ufuncs is both precise and complete.\n @click.option(\n \"--style\",\n type=click.Choice([\"pytest\", \"unittest\"]),\n default=\"pytest\",\n help=\"pytest-style function, or unittest-style method?\",\n )\n @click.option(\n \"-e\",\n \"--except\",\n \"except_\",\n type=obj_name,\n multiple=True,\n help=\"dotted name of exception(s) to ignore\",\n )\n def write(func, writer, except_, style): # noqa: D301 # \\b disables autowrap\n \"\"\"`hypothesis write` writes property-based tests for you!\n\n Type annotations are helpful but not required for our advanced introspection\n and templating logic. Try running the examples below to see how it works:\n\n \\b\n hypothesis write gzip\n hypothesis write re.compile --except re.error\n hypothesis write --style=unittest --idempotent sorted\n hypothesis write --binary-op operator.add\n hypothesis write --equivalent ast.literal_eval eval\n hypothesis write --roundtrip json.dumps json.loads\n \"\"\"\n # NOTE: if you want to call this function from Python, look instead at the\n # ``hypothesis.extra.ghostwriter`` module. Click-decorated functions have\n # a different calling convention, and raise SystemExit instead of returning.\n if writer is None:\n writer = \"magic\"\n elif writer == \"idempotent\" and len(func) > 1:\n raise click.UsageError(\"Test functions for idempotence one at a time.\")\n elif writer == \"roundtrip\" and len(func) == 1:\n writer = \"idempotent\"\n elif writer == \"equivalent\" and len(func) == 1:\n writer = \"fuzz\"\n\n try:\n from hypothesis.extra import ghostwriter\n except ImportError:\n sys.stderr.write(MESSAGE.format(\"black\"))\n sys.exit(1)\n\n print(getattr(ghostwriter, writer)(*func, except_=except_ or (), style=style))\n", "path": "hypothesis-python/src/hypothesis/extra/cli.py"}]}
| 2,098 | 361 |
gh_patches_debug_1059
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-5645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image uploader does not recognise uploaded file
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
As of today (8-09-2021) the image uploader does not recognise that an image has been selected and uploaded. Instead, it displays "Please provide an image URL" after hitting submit.
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...any edition
2. Do ...upload an image as a cover and submit.
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: "Please provide an image URL"
* Expected: Image should be added as cover.
### Details
- **Logged in (Y/N)?** y
- **Browser type/version?** Chrome Version 92.0.4515.159 (Official Build) (x86_64)
- **Operating system?** MacOS
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/upstream/covers.py`
Content:
```
1 """Handle book cover/author photo upload.
2 """
3 from logging import getLogger
4
5 import requests
6 import six
7 import web
8 from six import BytesIO
9
10 from infogami.utils import delegate
11 from infogami.utils.view import safeint
12 from openlibrary import accounts
13 from openlibrary.plugins.upstream.models import Image
14 from openlibrary.plugins.upstream.utils import get_coverstore_url, render_template
15
16 logger = getLogger("openlibrary.plugins.upstream.covers")
17 def setup():
18 pass
19
20 class add_cover(delegate.page):
21 path = "(/books/OL\d+M)/add-cover"
22 cover_category = "b"
23
24 def GET(self, key):
25 book = web.ctx.site.get(key)
26 return render_template('covers/add', book)
27
28 def POST(self, key):
29 book = web.ctx.site.get(key)
30 if not book:
31 raise web.notfound("")
32
33 i = web.input(file={}, url="")
34
35 # remove references to field storage objects
36 web.ctx.pop("_fieldstorage", None)
37
38 data = self.upload(key, i)
39 coverid = data.get('id')
40
41 if coverid:
42 self.save(book, coverid, url=i.url)
43 cover = Image(web.ctx.site, "b", coverid)
44 return render_template("covers/saved", cover)
45 else:
46 return render_template("covers/add", book, {'url': i.url}, data)
47
48 def upload(self, key, i):
49 """Uploads a cover to coverstore and returns the response."""
50 olid = key.split("/")[-1]
51
52 if i.file is not None and hasattr(i.file, 'value'):
53 data = i.file.value
54 else:
55 data = None
56
57 if i.url and i.url.strip() == "http://":
58 i.url = ""
59
60 user = accounts.get_current_user()
61 params = {
62 "author": user and user.key,
63 "source_url": i.url,
64 "olid": olid,
65 "ip": web.ctx.ip
66 }
67
68 upload_url = '%s/%s/upload2' % (
69 get_coverstore_url(), self.cover_category)
70
71 if upload_url.startswith("//"):
72 upload_url = "http:" + upload_url
73
74 try:
75 files = {'data': BytesIO(data)}
76 response = requests.post(upload_url, data=params, files=files)
77 return web.storage(response.json())
78 except requests.HTTPError as e:
79 logger.exception("Covers upload failed")
80 return web.storage({'error': str(e)})
81
82 def save(self, book, coverid, url=None):
83 book.covers = [coverid] + [cover.id for cover in book.get_covers()]
84 book._save("Added new cover", action="add-cover", data={"url": url})
85
86 class add_work_cover(add_cover):
87 path = "(/works/OL\d+W)/add-cover"
88 cover_category = "w"
89
90 def upload(self, key, i):
91 if "coverid" in i and safeint(i.coverid):
92 return web.storage(id=int(i.coverid))
93 else:
94 return add_cover.upload(self, key, i)
95
96 class add_photo(add_cover):
97 path = "(/authors/OL\d+A)/add-photo"
98 cover_category = "a"
99
100 def save(self, author, photoid, url=None):
101 author.photos = [photoid] + [photo.id for photo in author.get_photos()]
102 author._save("Added new photo", action="add-photo", data={"url": url})
103
104 class manage_covers(delegate.page):
105 path = "(/books/OL\d+M)/manage-covers"
106 def GET(self, key):
107 book = web.ctx.site.get(key)
108 if not book:
109 raise web.notfound()
110 return render_template("covers/manage", key, self.get_images(book))
111
112 def get_images(self, book):
113 return book.get_covers()
114
115 def get_image(self, book):
116 return book.get_cover()
117
118 def save_images(self, book, covers):
119 book.covers = covers
120 book._save('Update covers')
121
122 def POST(self, key):
123 book = web.ctx.site.get(key)
124 if not book:
125 raise web.notfound()
126
127 images = web.input(image=[]).image
128 if '-' in images:
129 images = [int(id) for id in images[:images.index('-')]]
130 self.save_images(book, images)
131 return render_template("covers/saved", self.get_image(book), showinfo=False)
132 else:
133 # ERROR
134 pass
135
136 class manage_work_covers(manage_covers):
137 path = "(/works/OL\d+W)/manage-covers"
138
139
140 class manage_photos(manage_covers):
141 path = "(/authors/OL\d+A)/manage-photos"
142
143 def get_images(self, author):
144 return author.get_photos()
145
146 def get_image(self, author):
147 return author.get_photo()
148
149 def save_images(self, author, photos):
150 author.photos = photos
151 author._save('Update photos')
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py
--- a/openlibrary/plugins/upstream/covers.py
+++ b/openlibrary/plugins/upstream/covers.py
@@ -54,7 +54,7 @@
else:
data = None
- if i.url and i.url.strip() == "http://":
+ if i.url and i.url.strip() == "https://":
i.url = ""
user = accounts.get_current_user()
|
{"golden_diff": "diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py\n--- a/openlibrary/plugins/upstream/covers.py\n+++ b/openlibrary/plugins/upstream/covers.py\n@@ -54,7 +54,7 @@\n else:\n data = None\n \n- if i.url and i.url.strip() == \"http://\":\n+ if i.url and i.url.strip() == \"https://\":\n i.url = \"\"\n \n user = accounts.get_current_user()\n", "issue": "Image uploader does not recognise uploaded file\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\nAs of today (8-09-2021) the image uploader does not recognise that an image has been selected and uploaded. Instead, it displays \"Please provide an image URL\" after hitting submit.\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...any edition\r\n2. Do ...upload an image as a cover and submit.\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: \"Please provide an image URL\"\r\n* Expected: Image should be added as cover.\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** y\r\n- **Browser type/version?** Chrome Version 92.0.4515.159 (Official Build) (x86_64)\r\n- **Operating system?** MacOS\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nfrom logging import getLogger\n\nimport requests\nimport six\nimport web\nfrom six import BytesIO\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n\nlogger = getLogger(\"openlibrary.plugins.upstream.covers\")\ndef setup():\n pass\n\nclass add_cover(delegate.page):\n path = \"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n coverid = data.get('id')\n\n if coverid:\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"http://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip\n }\n\n upload_url = '%s/%s/upload2' % (\n get_coverstore_url(), self.cover_category)\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n files = {'data': BytesIO(data)}\n response = requests.post(upload_url, data=params, files=files)\n return web.storage(response.json())\n except requests.HTTPError as e:\n logger.exception(\"Covers upload failed\")\n return web.storage({'error': str(e)})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\"Added new cover\", action=\"add-cover\", data={\"url\": url})\n\nclass add_work_cover(add_cover):\n path = \"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\nclass add_photo(add_cover):\n path = \"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\nclass manage_covers(delegate.page):\n path = \"(/books/OL\\d+M)/manage-covers\"\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[:images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\nclass manage_work_covers(manage_covers):\n path = \"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = \"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}], "after_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nfrom logging import getLogger\n\nimport requests\nimport six\nimport web\nfrom six import BytesIO\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n\nlogger = getLogger(\"openlibrary.plugins.upstream.covers\")\ndef setup():\n pass\n\nclass add_cover(delegate.page):\n path = \"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n coverid = data.get('id')\n\n if coverid:\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"https://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip\n }\n\n upload_url = '%s/%s/upload2' % (\n get_coverstore_url(), self.cover_category)\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n files = {'data': BytesIO(data)}\n response = requests.post(upload_url, data=params, files=files)\n return web.storage(response.json())\n except requests.HTTPError as e:\n logger.exception(\"Covers upload failed\")\n return web.storage({'error': str(e)})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\"Added new cover\", action=\"add-cover\", data={\"url\": url})\n\nclass add_work_cover(add_cover):\n path = \"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\nclass add_photo(add_cover):\n path = \"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\nclass manage_covers(delegate.page):\n path = \"(/books/OL\\d+M)/manage-covers\"\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[:images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\nclass manage_work_covers(manage_covers):\n path = \"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = \"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}]}
| 2,015 | 108 |
gh_patches_debug_36368
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-4017
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes when cryptography module is missing
### Brief description
scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes in multiple environments
### Scapy version
2.5.0
### Python version
3.11.2
### Operating system
macOS Ventura 13.3.1 (with M1 chip)
### Additional environment information
_No response_
### How to reproduce
```
$ python --version
Python 3.11.2
$ pip install scapy
$ pip show scapy
Name: scapy
Version: 2.5.0
...
$ python
Python 3.11.2 (main, Feb 16 2023, 02:55:59) [Clang 14.0.0 (clang-1400.0.29.202)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF
>>> TLS13_HKDF("sha256")
```
We can also reproduce from the default python docker image:
```
$ docker run -it --entrypoint bash python:latest
# pip install scapy
# python
>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF
>>> TLS13_HKDF("sha256")
```
### Actual result
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/homebrew/lib/python3.11/site-packages/scapy/layers/tls/crypto/hkdf.py", line 23, in __init__
self.hash = _get_hash(hash_name)
^^^^^^^^^^^^^^^^^^^^
TypeError: 'NoneType' object is not callable
### Expected result
<scapy.layers.tls.crypto.hkdf.TLS13_HKDF object at 0x...>
### Related resources
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/layers/tls/crypto/hkdf.py`
Content:
```
1 # SPDX-License-Identifier: GPL-2.0-only
2 # This file is part of Scapy
3 # See https://scapy.net/ for more information
4 # Copyright (C) 2017 Maxence Tury
5
6 """
7 Stateless HKDF for TLS 1.3.
8 """
9
10 import struct
11
12 from scapy.config import conf
13 from scapy.layers.tls.crypto.pkcs1 import _get_hash
14
15 if conf.crypto_valid:
16 from cryptography.hazmat.backends import default_backend
17 from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand
18 from cryptography.hazmat.primitives.hashes import Hash
19 from cryptography.hazmat.primitives.hmac import HMAC
20
21
22 class TLS13_HKDF(object):
23 def __init__(self, hash_name="sha256"):
24 self.hash = _get_hash(hash_name)
25
26 def extract(self, salt, ikm):
27 h = self.hash
28 hkdf = HKDF(h, h.digest_size, salt, None, default_backend())
29 if ikm is None:
30 ikm = b"\x00" * h.digest_size
31 return hkdf._extract(ikm)
32
33 def expand(self, prk, info, L):
34 h = self.hash
35 hkdf = HKDFExpand(h, L, info, default_backend())
36 return hkdf.derive(prk)
37
38 def expand_label(self, secret, label, hash_value, length):
39 hkdf_label = struct.pack("!H", length)
40 hkdf_label += struct.pack("B", 6 + len(label))
41 hkdf_label += b"tls13 "
42 hkdf_label += label
43 hkdf_label += struct.pack("B", len(hash_value))
44 hkdf_label += hash_value
45 return self.expand(secret, hkdf_label, length)
46
47 def derive_secret(self, secret, label, messages):
48 h = Hash(self.hash, backend=default_backend())
49 h.update(messages)
50 hash_messages = h.finalize()
51 hash_len = self.hash.digest_size
52 return self.expand_label(secret, label, hash_messages, hash_len)
53
54 def compute_verify_data(self, basekey, handshake_context):
55 hash_len = self.hash.digest_size
56 finished_key = self.expand_label(basekey, b"finished", b"", hash_len)
57
58 h = Hash(self.hash, backend=default_backend())
59 h.update(handshake_context)
60 hash_value = h.finalize()
61
62 hm = HMAC(finished_key, self.hash, default_backend())
63 hm.update(hash_value)
64 return hm.finalize()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scapy/layers/tls/crypto/hkdf.py b/scapy/layers/tls/crypto/hkdf.py
--- a/scapy/layers/tls/crypto/hkdf.py
+++ b/scapy/layers/tls/crypto/hkdf.py
@@ -9,7 +9,7 @@
import struct
-from scapy.config import conf
+from scapy.config import conf, crypto_validator
from scapy.layers.tls.crypto.pkcs1 import _get_hash
if conf.crypto_valid:
@@ -20,9 +20,11 @@
class TLS13_HKDF(object):
+ @crypto_validator
def __init__(self, hash_name="sha256"):
self.hash = _get_hash(hash_name)
+ @crypto_validator
def extract(self, salt, ikm):
h = self.hash
hkdf = HKDF(h, h.digest_size, salt, None, default_backend())
@@ -30,11 +32,13 @@
ikm = b"\x00" * h.digest_size
return hkdf._extract(ikm)
+ @crypto_validator
def expand(self, prk, info, L):
h = self.hash
hkdf = HKDFExpand(h, L, info, default_backend())
return hkdf.derive(prk)
+ @crypto_validator
def expand_label(self, secret, label, hash_value, length):
hkdf_label = struct.pack("!H", length)
hkdf_label += struct.pack("B", 6 + len(label))
@@ -44,6 +48,7 @@
hkdf_label += hash_value
return self.expand(secret, hkdf_label, length)
+ @crypto_validator
def derive_secret(self, secret, label, messages):
h = Hash(self.hash, backend=default_backend())
h.update(messages)
@@ -51,6 +56,7 @@
hash_len = self.hash.digest_size
return self.expand_label(secret, label, hash_messages, hash_len)
+ @crypto_validator
def compute_verify_data(self, basekey, handshake_context):
hash_len = self.hash.digest_size
finished_key = self.expand_label(basekey, b"finished", b"", hash_len)
|
{"golden_diff": "diff --git a/scapy/layers/tls/crypto/hkdf.py b/scapy/layers/tls/crypto/hkdf.py\n--- a/scapy/layers/tls/crypto/hkdf.py\n+++ b/scapy/layers/tls/crypto/hkdf.py\n@@ -9,7 +9,7 @@\n \n import struct\n \n-from scapy.config import conf\n+from scapy.config import conf, crypto_validator\n from scapy.layers.tls.crypto.pkcs1 import _get_hash\n \n if conf.crypto_valid:\n@@ -20,9 +20,11 @@\n \n \n class TLS13_HKDF(object):\n+ @crypto_validator\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n \n+ @crypto_validator\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n@@ -30,11 +32,13 @@\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n \n+ @crypto_validator\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n \n+ @crypto_validator\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n@@ -44,6 +48,7 @@\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n \n+ @crypto_validator\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n@@ -51,6 +56,7 @@\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n \n+ @crypto_validator\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n", "issue": "scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes when cryptography module is missing\n### Brief description\n\nscapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes in multiple environments\n\n### Scapy version\n\n2.5.0\n\n### Python version\n\n3.11.2\n\n### Operating system\n\nmacOS Ventura 13.3.1 (with M1 chip)\n\n### Additional environment information\n\n_No response_\n\n### How to reproduce\n\n```\r\n$ python --version\r\nPython 3.11.2\r\n$ pip install scapy\r\n$ pip show scapy\r\nName: scapy\r\nVersion: 2.5.0\r\n...\r\n$ python\r\nPython 3.11.2 (main, Feb 16 2023, 02:55:59) [Clang 14.0.0 (clang-1400.0.29.202)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF\r\n>>> TLS13_HKDF(\"sha256\")\r\n```\r\n\r\nWe can also reproduce from the default python docker image:\r\n```\r\n$ docker run -it --entrypoint bash python:latest\r\n# pip install scapy\r\n# python\r\n>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF\r\n>>> TLS13_HKDF(\"sha256\")\r\n```\n\n### Actual result\n\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/homebrew/lib/python3.11/site-packages/scapy/layers/tls/crypto/hkdf.py\", line 23, in __init__\r\n self.hash = _get_hash(hash_name)\r\n ^^^^^^^^^^^^^^^^^^^^\r\nTypeError: 'NoneType' object is not callable\n\n### Expected result\n\n<scapy.layers.tls.crypto.hkdf.TLS13_HKDF object at 0x...>\n\n### Related resources\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) 2017 Maxence Tury\n\n\"\"\"\nStateless HKDF for TLS 1.3.\n\"\"\"\n\nimport struct\n\nfrom scapy.config import conf\nfrom scapy.layers.tls.crypto.pkcs1 import _get_hash\n\nif conf.crypto_valid:\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand\n from cryptography.hazmat.primitives.hashes import Hash\n from cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass TLS13_HKDF(object):\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n if ikm is None:\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n hkdf_label += b\"tls13 \"\n hkdf_label += label\n hkdf_label += struct.pack(\"B\", len(hash_value))\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n hash_messages = h.finalize()\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n\n h = Hash(self.hash, backend=default_backend())\n h.update(handshake_context)\n hash_value = h.finalize()\n\n hm = HMAC(finished_key, self.hash, default_backend())\n hm.update(hash_value)\n return hm.finalize()\n", "path": "scapy/layers/tls/crypto/hkdf.py"}], "after_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) 2017 Maxence Tury\n\n\"\"\"\nStateless HKDF for TLS 1.3.\n\"\"\"\n\nimport struct\n\nfrom scapy.config import conf, crypto_validator\nfrom scapy.layers.tls.crypto.pkcs1 import _get_hash\n\nif conf.crypto_valid:\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand\n from cryptography.hazmat.primitives.hashes import Hash\n from cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass TLS13_HKDF(object):\n @crypto_validator\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n\n @crypto_validator\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n if ikm is None:\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n\n @crypto_validator\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n\n @crypto_validator\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n hkdf_label += b\"tls13 \"\n hkdf_label += label\n hkdf_label += struct.pack(\"B\", len(hash_value))\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n\n @crypto_validator\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n hash_messages = h.finalize()\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n\n @crypto_validator\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n\n h = Hash(self.hash, backend=default_backend())\n h.update(handshake_context)\n hash_value = h.finalize()\n\n hm = HMAC(finished_key, self.hash, default_backend())\n hm.update(hash_value)\n return hm.finalize()\n", "path": "scapy/layers/tls/crypto/hkdf.py"}]}
| 1,396 | 494 |
gh_patches_debug_33826
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-3063
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some pyro.contrib.funsor tests are failing in torch==1.11.0
This issue records tests of `pyro.contrib.funsor` that started failing in #3045. In that PR I've xfailed the following tests so as to unblock that PR.
- [ ] `python examples/contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 --funsor`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_1`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_4`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_6`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_7`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_2`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_3`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_4`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_5`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_6`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_7`
- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_8`
cc @eb8680 @ordabayevy
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/contrib/funsor/infer/traceenum_elbo.py`
Content:
```
1 # Copyright Contributors to the Pyro project.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import contextlib
5
6 import funsor
7 from funsor.adjoint import AdjointTape
8
9 from pyro.contrib.funsor import to_data, to_funsor
10 from pyro.contrib.funsor.handlers import enum, plate, replay, trace
11 from pyro.contrib.funsor.infer.elbo import ELBO, Jit_ELBO
12 from pyro.distributions.util import copy_docs_from
13 from pyro.infer import TraceEnum_ELBO as _OrigTraceEnum_ELBO
14
15
16 # Work around a bug in unfold_contraction_generic_tuple interacting with
17 # Approximate introduced in https://github.com/pyro-ppl/funsor/pull/488 .
18 # Once fixed, this can be replaced by funsor.optimizer.apply_optimizer().
19 def apply_optimizer(x):
20 with funsor.interpretations.normalize:
21 expr = funsor.interpreter.reinterpret(x)
22
23 with funsor.optimizer.optimize_base:
24 return funsor.interpreter.reinterpret(expr)
25
26
27 def terms_from_trace(tr):
28 """Helper function to extract elbo components from execution traces."""
29 # data structure containing densities, measures, scales, and identification
30 # of free variables as either product (plate) variables or sum (measure) variables
31 terms = {
32 "log_factors": [],
33 "log_measures": [],
34 "scale": to_funsor(1.0),
35 "plate_vars": frozenset(),
36 "measure_vars": frozenset(),
37 "plate_to_step": dict(),
38 }
39 for name, node in tr.nodes.items():
40 # add markov dimensions to the plate_to_step dictionary
41 if node["type"] == "markov_chain":
42 terms["plate_to_step"][node["name"]] = node["value"]
43 # ensure previous step variables are added to measure_vars
44 for step in node["value"]:
45 terms["measure_vars"] |= frozenset(
46 {
47 var
48 for var in step[1:-1]
49 if tr.nodes[var]["funsor"].get("log_measure", None) is not None
50 }
51 )
52 if (
53 node["type"] != "sample"
54 or type(node["fn"]).__name__ == "_Subsample"
55 or node["infer"].get("_do_not_score", False)
56 ):
57 continue
58 # grab plate dimensions from the cond_indep_stack
59 terms["plate_vars"] |= frozenset(
60 f.name for f in node["cond_indep_stack"] if f.vectorized
61 )
62 # grab the log-measure, found only at sites that are not replayed or observed
63 if node["funsor"].get("log_measure", None) is not None:
64 terms["log_measures"].append(node["funsor"]["log_measure"])
65 # sum (measure) variables: the fresh non-plate variables at a site
66 terms["measure_vars"] |= (
67 frozenset(node["funsor"]["value"].inputs) | {name}
68 ) - terms["plate_vars"]
69 # grab the scale, assuming a common subsampling scale
70 if (
71 node.get("replay_active", False)
72 and set(node["funsor"]["log_prob"].inputs) & terms["measure_vars"]
73 and float(to_data(node["funsor"]["scale"])) != 1.0
74 ):
75 # model site that depends on enumerated variable: common scale
76 terms["scale"] = node["funsor"]["scale"]
77 else: # otherwise: default scale behavior
78 node["funsor"]["log_prob"] = (
79 node["funsor"]["log_prob"] * node["funsor"]["scale"]
80 )
81 # grab the log-density, found at all sites except those that are not replayed
82 if node["is_observed"] or not node.get("replay_skipped", False):
83 terms["log_factors"].append(node["funsor"]["log_prob"])
84 # add plate dimensions to the plate_to_step dictionary
85 terms["plate_to_step"].update(
86 {plate: terms["plate_to_step"].get(plate, {}) for plate in terms["plate_vars"]}
87 )
88 return terms
89
90
91 @copy_docs_from(_OrigTraceEnum_ELBO)
92 class TraceMarkovEnum_ELBO(ELBO):
93 def differentiable_loss(self, model, guide, *args, **kwargs):
94
95 # get batched, enumerated, to_funsor-ed traces from the guide and model
96 with plate(
97 size=self.num_particles
98 ) if self.num_particles > 1 else contextlib.ExitStack(), enum(
99 first_available_dim=(-self.max_plate_nesting - 1)
100 if self.max_plate_nesting
101 else None
102 ):
103 guide_tr = trace(guide).get_trace(*args, **kwargs)
104 model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)
105
106 # extract from traces all metadata that we will need to compute the elbo
107 guide_terms = terms_from_trace(guide_tr)
108 model_terms = terms_from_trace(model_tr)
109
110 # guide side enumeration is not supported
111 if any(guide_terms["plate_to_step"].values()):
112 raise NotImplementedError(
113 "TraceMarkovEnum_ELBO does not yet support guide side Markov enumeration"
114 )
115
116 # build up a lazy expression for the elbo
117 with funsor.terms.lazy:
118 # identify and contract out auxiliary variables in the model with partial_sum_product
119 contracted_factors, uncontracted_factors = [], []
120 for f in model_terms["log_factors"]:
121 if model_terms["measure_vars"].intersection(f.inputs):
122 contracted_factors.append(f)
123 else:
124 uncontracted_factors.append(f)
125 # incorporate the effects of subsampling and handlers.scale through a common scale factor
126 markov_dims = frozenset(
127 {plate for plate, step in model_terms["plate_to_step"].items() if step}
128 )
129 contracted_costs = [
130 model_terms["scale"] * f
131 for f in funsor.sum_product.dynamic_partial_sum_product(
132 funsor.ops.logaddexp,
133 funsor.ops.add,
134 model_terms["log_measures"] + contracted_factors,
135 plate_to_step=model_terms["plate_to_step"],
136 eliminate=model_terms["measure_vars"] | markov_dims,
137 )
138 ]
139
140 costs = contracted_costs + uncontracted_factors # model costs: logp
141 costs += [-f for f in guide_terms["log_factors"]] # guide costs: -logq
142
143 # finally, integrate out guide variables in the elbo and all plates
144 plate_vars = guide_terms["plate_vars"] | model_terms["plate_vars"]
145 elbo = to_funsor(0, output=funsor.Real)
146 for cost in costs:
147 # compute the marginal logq in the guide corresponding to this cost term
148 log_prob = funsor.sum_product.sum_product(
149 funsor.ops.logaddexp,
150 funsor.ops.add,
151 guide_terms["log_measures"],
152 plates=plate_vars,
153 eliminate=(plate_vars | guide_terms["measure_vars"])
154 - frozenset(cost.inputs),
155 )
156 # compute the expected cost term E_q[logp] or E_q[-logq] using the marginal logq for q
157 elbo_term = funsor.Integrate(
158 log_prob, cost, guide_terms["measure_vars"] & frozenset(cost.inputs)
159 )
160 elbo += elbo_term.reduce(
161 funsor.ops.add, plate_vars & frozenset(cost.inputs)
162 )
163
164 # evaluate the elbo, using memoize to share tensor computation where possible
165 with funsor.interpretations.memoize():
166 return -to_data(apply_optimizer(elbo))
167
168
169 @copy_docs_from(_OrigTraceEnum_ELBO)
170 class TraceEnum_ELBO(ELBO):
171 def differentiable_loss(self, model, guide, *args, **kwargs):
172
173 # get batched, enumerated, to_funsor-ed traces from the guide and model
174 with plate(
175 size=self.num_particles
176 ) if self.num_particles > 1 else contextlib.ExitStack(), enum(
177 first_available_dim=(-self.max_plate_nesting - 1)
178 if self.max_plate_nesting
179 else None
180 ):
181 guide_tr = trace(guide).get_trace(*args, **kwargs)
182 model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)
183
184 # extract from traces all metadata that we will need to compute the elbo
185 guide_terms = terms_from_trace(guide_tr)
186 model_terms = terms_from_trace(model_tr)
187
188 # build up a lazy expression for the elbo
189 with funsor.terms.lazy:
190 # identify and contract out auxiliary variables in the model with partial_sum_product
191 contracted_factors, uncontracted_factors = [], []
192 for f in model_terms["log_factors"]:
193 if model_terms["measure_vars"].intersection(f.inputs):
194 contracted_factors.append(f)
195 else:
196 uncontracted_factors.append(f)
197 # incorporate the effects of subsampling and handlers.scale through a common scale factor
198 contracted_costs = [
199 model_terms["scale"] * f
200 for f in funsor.sum_product.partial_sum_product(
201 funsor.ops.logaddexp,
202 funsor.ops.add,
203 model_terms["log_measures"] + contracted_factors,
204 plates=model_terms["plate_vars"],
205 eliminate=model_terms["measure_vars"],
206 )
207 ]
208
209 # accumulate costs from model (logp) and guide (-logq)
210 costs = contracted_costs + uncontracted_factors # model costs: logp
211 costs += [-f for f in guide_terms["log_factors"]] # guide costs: -logq
212
213 # compute expected cost
214 # Cf. pyro.infer.util.Dice.compute_expectation()
215 # https://github.com/pyro-ppl/pyro/blob/0.3.0/pyro/infer/util.py#L212
216 # TODO Replace this with funsor.Expectation
217 plate_vars = guide_terms["plate_vars"] | model_terms["plate_vars"]
218 # compute the marginal logq in the guide corresponding to each cost term
219 targets = dict()
220 for cost in costs:
221 input_vars = frozenset(cost.inputs)
222 if input_vars not in targets:
223 targets[input_vars] = funsor.Tensor(
224 funsor.ops.new_zeros(
225 funsor.tensor.get_default_prototype(),
226 tuple(v.size for v in cost.inputs.values()),
227 ),
228 cost.inputs,
229 cost.dtype,
230 )
231 with AdjointTape() as tape:
232 logzq = funsor.sum_product.sum_product(
233 funsor.ops.logaddexp,
234 funsor.ops.add,
235 guide_terms["log_measures"] + list(targets.values()),
236 plates=plate_vars,
237 eliminate=(plate_vars | guide_terms["measure_vars"]),
238 )
239 marginals = tape.adjoint(
240 funsor.ops.logaddexp, funsor.ops.add, logzq, tuple(targets.values())
241 )
242 # finally, integrate out guide variables in the elbo and all plates
243 elbo = to_funsor(0, output=funsor.Real)
244 for cost in costs:
245 target = targets[frozenset(cost.inputs)]
246 logzq_local = marginals[target].reduce(
247 funsor.ops.logaddexp, frozenset(cost.inputs) - plate_vars
248 )
249 log_prob = marginals[target] - logzq_local
250 elbo_term = funsor.Integrate(
251 log_prob,
252 cost,
253 guide_terms["measure_vars"] & frozenset(log_prob.inputs),
254 )
255 elbo += elbo_term.reduce(
256 funsor.ops.add, plate_vars & frozenset(cost.inputs)
257 )
258
259 # evaluate the elbo, using memoize to share tensor computation where possible
260 with funsor.interpretations.memoize():
261 return -to_data(apply_optimizer(elbo))
262
263
264 class JitTraceEnum_ELBO(Jit_ELBO, TraceEnum_ELBO):
265 pass
266
267
268 class JitTraceMarkovEnum_ELBO(Jit_ELBO, TraceMarkovEnum_ELBO):
269 pass
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/contrib/funsor/infer/traceenum_elbo.py b/pyro/contrib/funsor/infer/traceenum_elbo.py
--- a/pyro/contrib/funsor/infer/traceenum_elbo.py
+++ b/pyro/contrib/funsor/infer/traceenum_elbo.py
@@ -5,6 +5,7 @@
import funsor
from funsor.adjoint import AdjointTape
+from funsor.sum_product import _partition
from pyro.contrib.funsor import to_data, to_funsor
from pyro.contrib.funsor.handlers import enum, plate, replay, trace
@@ -194,17 +195,28 @@
contracted_factors.append(f)
else:
uncontracted_factors.append(f)
+ contracted_costs = []
# incorporate the effects of subsampling and handlers.scale through a common scale factor
- contracted_costs = [
- model_terms["scale"] * f
+ for group_factors, group_vars in _partition(
+ model_terms["log_measures"] + contracted_factors,
+ model_terms["measure_vars"],
+ ):
+ group_factor_vars = frozenset().union(
+ *[f.inputs for f in group_factors]
+ )
+ group_plates = model_terms["plate_vars"] & group_factor_vars
+ outermost_plates = frozenset.intersection(
+ *(frozenset(f.inputs) & group_plates for f in group_factors)
+ )
+ elim_plates = group_plates - outermost_plates
for f in funsor.sum_product.partial_sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
- model_terms["log_measures"] + contracted_factors,
- plates=model_terms["plate_vars"],
- eliminate=model_terms["measure_vars"],
- )
- ]
+ group_factors,
+ plates=group_plates,
+ eliminate=group_vars | elim_plates,
+ ):
+ contracted_costs.append(model_terms["scale"] * f)
# accumulate costs from model (logp) and guide (-logq)
costs = contracted_costs + uncontracted_factors # model costs: logp
|
{"golden_diff": "diff --git a/pyro/contrib/funsor/infer/traceenum_elbo.py b/pyro/contrib/funsor/infer/traceenum_elbo.py\n--- a/pyro/contrib/funsor/infer/traceenum_elbo.py\n+++ b/pyro/contrib/funsor/infer/traceenum_elbo.py\n@@ -5,6 +5,7 @@\n \n import funsor\n from funsor.adjoint import AdjointTape\n+from funsor.sum_product import _partition\n \n from pyro.contrib.funsor import to_data, to_funsor\n from pyro.contrib.funsor.handlers import enum, plate, replay, trace\n@@ -194,17 +195,28 @@\n contracted_factors.append(f)\n else:\n uncontracted_factors.append(f)\n+ contracted_costs = []\n # incorporate the effects of subsampling and handlers.scale through a common scale factor\n- contracted_costs = [\n- model_terms[\"scale\"] * f\n+ for group_factors, group_vars in _partition(\n+ model_terms[\"log_measures\"] + contracted_factors,\n+ model_terms[\"measure_vars\"],\n+ ):\n+ group_factor_vars = frozenset().union(\n+ *[f.inputs for f in group_factors]\n+ )\n+ group_plates = model_terms[\"plate_vars\"] & group_factor_vars\n+ outermost_plates = frozenset.intersection(\n+ *(frozenset(f.inputs) & group_plates for f in group_factors)\n+ )\n+ elim_plates = group_plates - outermost_plates\n for f in funsor.sum_product.partial_sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n- model_terms[\"log_measures\"] + contracted_factors,\n- plates=model_terms[\"plate_vars\"],\n- eliminate=model_terms[\"measure_vars\"],\n- )\n- ]\n+ group_factors,\n+ plates=group_plates,\n+ eliminate=group_vars | elim_plates,\n+ ):\n+ contracted_costs.append(model_terms[\"scale\"] * f)\n \n # accumulate costs from model (logp) and guide (-logq)\n costs = contracted_costs + uncontracted_factors # model costs: logp\n", "issue": "Some pyro.contrib.funsor tests are failing in torch==1.11.0\nThis issue records tests of `pyro.contrib.funsor` that started failing in #3045. In that PR I've xfailed the following tests so as to unblock that PR.\r\n- [ ] `python examples/contrib/funsor/hmm.py --num-steps=1 --truncate=10 --model=5 --funsor`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_1`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_4`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_6`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plate_7`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_2`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_3`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_4`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_5`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_6`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_7`\r\n- [ ] `pytest -x tests/contrib/funsor/test_enum_funsor.py::test_elbo_enumerate_plates_8`\r\n\r\ncc @eb8680 @ordabayevy \n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport contextlib\n\nimport funsor\nfrom funsor.adjoint import AdjointTape\n\nfrom pyro.contrib.funsor import to_data, to_funsor\nfrom pyro.contrib.funsor.handlers import enum, plate, replay, trace\nfrom pyro.contrib.funsor.infer.elbo import ELBO, Jit_ELBO\nfrom pyro.distributions.util import copy_docs_from\nfrom pyro.infer import TraceEnum_ELBO as _OrigTraceEnum_ELBO\n\n\n# Work around a bug in unfold_contraction_generic_tuple interacting with\n# Approximate introduced in https://github.com/pyro-ppl/funsor/pull/488 .\n# Once fixed, this can be replaced by funsor.optimizer.apply_optimizer().\ndef apply_optimizer(x):\n with funsor.interpretations.normalize:\n expr = funsor.interpreter.reinterpret(x)\n\n with funsor.optimizer.optimize_base:\n return funsor.interpreter.reinterpret(expr)\n\n\ndef terms_from_trace(tr):\n \"\"\"Helper function to extract elbo components from execution traces.\"\"\"\n # data structure containing densities, measures, scales, and identification\n # of free variables as either product (plate) variables or sum (measure) variables\n terms = {\n \"log_factors\": [],\n \"log_measures\": [],\n \"scale\": to_funsor(1.0),\n \"plate_vars\": frozenset(),\n \"measure_vars\": frozenset(),\n \"plate_to_step\": dict(),\n }\n for name, node in tr.nodes.items():\n # add markov dimensions to the plate_to_step dictionary\n if node[\"type\"] == \"markov_chain\":\n terms[\"plate_to_step\"][node[\"name\"]] = node[\"value\"]\n # ensure previous step variables are added to measure_vars\n for step in node[\"value\"]:\n terms[\"measure_vars\"] |= frozenset(\n {\n var\n for var in step[1:-1]\n if tr.nodes[var][\"funsor\"].get(\"log_measure\", None) is not None\n }\n )\n if (\n node[\"type\"] != \"sample\"\n or type(node[\"fn\"]).__name__ == \"_Subsample\"\n or node[\"infer\"].get(\"_do_not_score\", False)\n ):\n continue\n # grab plate dimensions from the cond_indep_stack\n terms[\"plate_vars\"] |= frozenset(\n f.name for f in node[\"cond_indep_stack\"] if f.vectorized\n )\n # grab the log-measure, found only at sites that are not replayed or observed\n if node[\"funsor\"].get(\"log_measure\", None) is not None:\n terms[\"log_measures\"].append(node[\"funsor\"][\"log_measure\"])\n # sum (measure) variables: the fresh non-plate variables at a site\n terms[\"measure_vars\"] |= (\n frozenset(node[\"funsor\"][\"value\"].inputs) | {name}\n ) - terms[\"plate_vars\"]\n # grab the scale, assuming a common subsampling scale\n if (\n node.get(\"replay_active\", False)\n and set(node[\"funsor\"][\"log_prob\"].inputs) & terms[\"measure_vars\"]\n and float(to_data(node[\"funsor\"][\"scale\"])) != 1.0\n ):\n # model site that depends on enumerated variable: common scale\n terms[\"scale\"] = node[\"funsor\"][\"scale\"]\n else: # otherwise: default scale behavior\n node[\"funsor\"][\"log_prob\"] = (\n node[\"funsor\"][\"log_prob\"] * node[\"funsor\"][\"scale\"]\n )\n # grab the log-density, found at all sites except those that are not replayed\n if node[\"is_observed\"] or not node.get(\"replay_skipped\", False):\n terms[\"log_factors\"].append(node[\"funsor\"][\"log_prob\"])\n # add plate dimensions to the plate_to_step dictionary\n terms[\"plate_to_step\"].update(\n {plate: terms[\"plate_to_step\"].get(plate, {}) for plate in terms[\"plate_vars\"]}\n )\n return terms\n\n\n@copy_docs_from(_OrigTraceEnum_ELBO)\nclass TraceMarkovEnum_ELBO(ELBO):\n def differentiable_loss(self, model, guide, *args, **kwargs):\n\n # get batched, enumerated, to_funsor-ed traces from the guide and model\n with plate(\n size=self.num_particles\n ) if self.num_particles > 1 else contextlib.ExitStack(), enum(\n first_available_dim=(-self.max_plate_nesting - 1)\n if self.max_plate_nesting\n else None\n ):\n guide_tr = trace(guide).get_trace(*args, **kwargs)\n model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)\n\n # extract from traces all metadata that we will need to compute the elbo\n guide_terms = terms_from_trace(guide_tr)\n model_terms = terms_from_trace(model_tr)\n\n # guide side enumeration is not supported\n if any(guide_terms[\"plate_to_step\"].values()):\n raise NotImplementedError(\n \"TraceMarkovEnum_ELBO does not yet support guide side Markov enumeration\"\n )\n\n # build up a lazy expression for the elbo\n with funsor.terms.lazy:\n # identify and contract out auxiliary variables in the model with partial_sum_product\n contracted_factors, uncontracted_factors = [], []\n for f in model_terms[\"log_factors\"]:\n if model_terms[\"measure_vars\"].intersection(f.inputs):\n contracted_factors.append(f)\n else:\n uncontracted_factors.append(f)\n # incorporate the effects of subsampling and handlers.scale through a common scale factor\n markov_dims = frozenset(\n {plate for plate, step in model_terms[\"plate_to_step\"].items() if step}\n )\n contracted_costs = [\n model_terms[\"scale\"] * f\n for f in funsor.sum_product.dynamic_partial_sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n model_terms[\"log_measures\"] + contracted_factors,\n plate_to_step=model_terms[\"plate_to_step\"],\n eliminate=model_terms[\"measure_vars\"] | markov_dims,\n )\n ]\n\n costs = contracted_costs + uncontracted_factors # model costs: logp\n costs += [-f for f in guide_terms[\"log_factors\"]] # guide costs: -logq\n\n # finally, integrate out guide variables in the elbo and all plates\n plate_vars = guide_terms[\"plate_vars\"] | model_terms[\"plate_vars\"]\n elbo = to_funsor(0, output=funsor.Real)\n for cost in costs:\n # compute the marginal logq in the guide corresponding to this cost term\n log_prob = funsor.sum_product.sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n guide_terms[\"log_measures\"],\n plates=plate_vars,\n eliminate=(plate_vars | guide_terms[\"measure_vars\"])\n - frozenset(cost.inputs),\n )\n # compute the expected cost term E_q[logp] or E_q[-logq] using the marginal logq for q\n elbo_term = funsor.Integrate(\n log_prob, cost, guide_terms[\"measure_vars\"] & frozenset(cost.inputs)\n )\n elbo += elbo_term.reduce(\n funsor.ops.add, plate_vars & frozenset(cost.inputs)\n )\n\n # evaluate the elbo, using memoize to share tensor computation where possible\n with funsor.interpretations.memoize():\n return -to_data(apply_optimizer(elbo))\n\n\n@copy_docs_from(_OrigTraceEnum_ELBO)\nclass TraceEnum_ELBO(ELBO):\n def differentiable_loss(self, model, guide, *args, **kwargs):\n\n # get batched, enumerated, to_funsor-ed traces from the guide and model\n with plate(\n size=self.num_particles\n ) if self.num_particles > 1 else contextlib.ExitStack(), enum(\n first_available_dim=(-self.max_plate_nesting - 1)\n if self.max_plate_nesting\n else None\n ):\n guide_tr = trace(guide).get_trace(*args, **kwargs)\n model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)\n\n # extract from traces all metadata that we will need to compute the elbo\n guide_terms = terms_from_trace(guide_tr)\n model_terms = terms_from_trace(model_tr)\n\n # build up a lazy expression for the elbo\n with funsor.terms.lazy:\n # identify and contract out auxiliary variables in the model with partial_sum_product\n contracted_factors, uncontracted_factors = [], []\n for f in model_terms[\"log_factors\"]:\n if model_terms[\"measure_vars\"].intersection(f.inputs):\n contracted_factors.append(f)\n else:\n uncontracted_factors.append(f)\n # incorporate the effects of subsampling and handlers.scale through a common scale factor\n contracted_costs = [\n model_terms[\"scale\"] * f\n for f in funsor.sum_product.partial_sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n model_terms[\"log_measures\"] + contracted_factors,\n plates=model_terms[\"plate_vars\"],\n eliminate=model_terms[\"measure_vars\"],\n )\n ]\n\n # accumulate costs from model (logp) and guide (-logq)\n costs = contracted_costs + uncontracted_factors # model costs: logp\n costs += [-f for f in guide_terms[\"log_factors\"]] # guide costs: -logq\n\n # compute expected cost\n # Cf. pyro.infer.util.Dice.compute_expectation()\n # https://github.com/pyro-ppl/pyro/blob/0.3.0/pyro/infer/util.py#L212\n # TODO Replace this with funsor.Expectation\n plate_vars = guide_terms[\"plate_vars\"] | model_terms[\"plate_vars\"]\n # compute the marginal logq in the guide corresponding to each cost term\n targets = dict()\n for cost in costs:\n input_vars = frozenset(cost.inputs)\n if input_vars not in targets:\n targets[input_vars] = funsor.Tensor(\n funsor.ops.new_zeros(\n funsor.tensor.get_default_prototype(),\n tuple(v.size for v in cost.inputs.values()),\n ),\n cost.inputs,\n cost.dtype,\n )\n with AdjointTape() as tape:\n logzq = funsor.sum_product.sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n guide_terms[\"log_measures\"] + list(targets.values()),\n plates=plate_vars,\n eliminate=(plate_vars | guide_terms[\"measure_vars\"]),\n )\n marginals = tape.adjoint(\n funsor.ops.logaddexp, funsor.ops.add, logzq, tuple(targets.values())\n )\n # finally, integrate out guide variables in the elbo and all plates\n elbo = to_funsor(0, output=funsor.Real)\n for cost in costs:\n target = targets[frozenset(cost.inputs)]\n logzq_local = marginals[target].reduce(\n funsor.ops.logaddexp, frozenset(cost.inputs) - plate_vars\n )\n log_prob = marginals[target] - logzq_local\n elbo_term = funsor.Integrate(\n log_prob,\n cost,\n guide_terms[\"measure_vars\"] & frozenset(log_prob.inputs),\n )\n elbo += elbo_term.reduce(\n funsor.ops.add, plate_vars & frozenset(cost.inputs)\n )\n\n # evaluate the elbo, using memoize to share tensor computation where possible\n with funsor.interpretations.memoize():\n return -to_data(apply_optimizer(elbo))\n\n\nclass JitTraceEnum_ELBO(Jit_ELBO, TraceEnum_ELBO):\n pass\n\n\nclass JitTraceMarkovEnum_ELBO(Jit_ELBO, TraceMarkovEnum_ELBO):\n pass\n", "path": "pyro/contrib/funsor/infer/traceenum_elbo.py"}], "after_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nimport contextlib\n\nimport funsor\nfrom funsor.adjoint import AdjointTape\nfrom funsor.sum_product import _partition\n\nfrom pyro.contrib.funsor import to_data, to_funsor\nfrom pyro.contrib.funsor.handlers import enum, plate, replay, trace\nfrom pyro.contrib.funsor.infer.elbo import ELBO, Jit_ELBO\nfrom pyro.distributions.util import copy_docs_from\nfrom pyro.infer import TraceEnum_ELBO as _OrigTraceEnum_ELBO\n\n\n# Work around a bug in unfold_contraction_generic_tuple interacting with\n# Approximate introduced in https://github.com/pyro-ppl/funsor/pull/488 .\n# Once fixed, this can be replaced by funsor.optimizer.apply_optimizer().\ndef apply_optimizer(x):\n with funsor.interpretations.normalize:\n expr = funsor.interpreter.reinterpret(x)\n\n with funsor.optimizer.optimize_base:\n return funsor.interpreter.reinterpret(expr)\n\n\ndef terms_from_trace(tr):\n \"\"\"Helper function to extract elbo components from execution traces.\"\"\"\n # data structure containing densities, measures, scales, and identification\n # of free variables as either product (plate) variables or sum (measure) variables\n terms = {\n \"log_factors\": [],\n \"log_measures\": [],\n \"scale\": to_funsor(1.0),\n \"plate_vars\": frozenset(),\n \"measure_vars\": frozenset(),\n \"plate_to_step\": dict(),\n }\n for name, node in tr.nodes.items():\n # add markov dimensions to the plate_to_step dictionary\n if node[\"type\"] == \"markov_chain\":\n terms[\"plate_to_step\"][node[\"name\"]] = node[\"value\"]\n # ensure previous step variables are added to measure_vars\n for step in node[\"value\"]:\n terms[\"measure_vars\"] |= frozenset(\n {\n var\n for var in step[1:-1]\n if tr.nodes[var][\"funsor\"].get(\"log_measure\", None) is not None\n }\n )\n if (\n node[\"type\"] != \"sample\"\n or type(node[\"fn\"]).__name__ == \"_Subsample\"\n or node[\"infer\"].get(\"_do_not_score\", False)\n ):\n continue\n # grab plate dimensions from the cond_indep_stack\n terms[\"plate_vars\"] |= frozenset(\n f.name for f in node[\"cond_indep_stack\"] if f.vectorized\n )\n # grab the log-measure, found only at sites that are not replayed or observed\n if node[\"funsor\"].get(\"log_measure\", None) is not None:\n terms[\"log_measures\"].append(node[\"funsor\"][\"log_measure\"])\n # sum (measure) variables: the fresh non-plate variables at a site\n terms[\"measure_vars\"] |= (\n frozenset(node[\"funsor\"][\"value\"].inputs) | {name}\n ) - terms[\"plate_vars\"]\n # grab the scale, assuming a common subsampling scale\n if (\n node.get(\"replay_active\", False)\n and set(node[\"funsor\"][\"log_prob\"].inputs) & terms[\"measure_vars\"]\n and float(to_data(node[\"funsor\"][\"scale\"])) != 1.0\n ):\n # model site that depends on enumerated variable: common scale\n terms[\"scale\"] = node[\"funsor\"][\"scale\"]\n else: # otherwise: default scale behavior\n node[\"funsor\"][\"log_prob\"] = (\n node[\"funsor\"][\"log_prob\"] * node[\"funsor\"][\"scale\"]\n )\n # grab the log-density, found at all sites except those that are not replayed\n if node[\"is_observed\"] or not node.get(\"replay_skipped\", False):\n terms[\"log_factors\"].append(node[\"funsor\"][\"log_prob\"])\n # add plate dimensions to the plate_to_step dictionary\n terms[\"plate_to_step\"].update(\n {plate: terms[\"plate_to_step\"].get(plate, {}) for plate in terms[\"plate_vars\"]}\n )\n return terms\n\n\n@copy_docs_from(_OrigTraceEnum_ELBO)\nclass TraceMarkovEnum_ELBO(ELBO):\n def differentiable_loss(self, model, guide, *args, **kwargs):\n\n # get batched, enumerated, to_funsor-ed traces from the guide and model\n with plate(\n size=self.num_particles\n ) if self.num_particles > 1 else contextlib.ExitStack(), enum(\n first_available_dim=(-self.max_plate_nesting - 1)\n if self.max_plate_nesting\n else None\n ):\n guide_tr = trace(guide).get_trace(*args, **kwargs)\n model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)\n\n # extract from traces all metadata that we will need to compute the elbo\n guide_terms = terms_from_trace(guide_tr)\n model_terms = terms_from_trace(model_tr)\n\n # guide side enumeration is not supported\n if any(guide_terms[\"plate_to_step\"].values()):\n raise NotImplementedError(\n \"TraceMarkovEnum_ELBO does not yet support guide side Markov enumeration\"\n )\n\n # build up a lazy expression for the elbo\n with funsor.terms.lazy:\n # identify and contract out auxiliary variables in the model with partial_sum_product\n contracted_factors, uncontracted_factors = [], []\n for f in model_terms[\"log_factors\"]:\n if model_terms[\"measure_vars\"].intersection(f.inputs):\n contracted_factors.append(f)\n else:\n uncontracted_factors.append(f)\n # incorporate the effects of subsampling and handlers.scale through a common scale factor\n markov_dims = frozenset(\n {plate for plate, step in model_terms[\"plate_to_step\"].items() if step}\n )\n contracted_costs = [\n model_terms[\"scale\"] * f\n for f in funsor.sum_product.dynamic_partial_sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n model_terms[\"log_measures\"] + contracted_factors,\n plate_to_step=model_terms[\"plate_to_step\"],\n eliminate=model_terms[\"measure_vars\"] | markov_dims,\n )\n ]\n\n costs = contracted_costs + uncontracted_factors # model costs: logp\n costs += [-f for f in guide_terms[\"log_factors\"]] # guide costs: -logq\n\n # finally, integrate out guide variables in the elbo and all plates\n plate_vars = guide_terms[\"plate_vars\"] | model_terms[\"plate_vars\"]\n elbo = to_funsor(0, output=funsor.Real)\n for cost in costs:\n # compute the marginal logq in the guide corresponding to this cost term\n log_prob = funsor.sum_product.sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n guide_terms[\"log_measures\"],\n plates=plate_vars,\n eliminate=(plate_vars | guide_terms[\"measure_vars\"])\n - frozenset(cost.inputs),\n )\n # compute the expected cost term E_q[logp] or E_q[-logq] using the marginal logq for q\n elbo_term = funsor.Integrate(\n log_prob, cost, guide_terms[\"measure_vars\"] & frozenset(cost.inputs)\n )\n elbo += elbo_term.reduce(\n funsor.ops.add, plate_vars & frozenset(cost.inputs)\n )\n\n # evaluate the elbo, using memoize to share tensor computation where possible\n with funsor.interpretations.memoize():\n return -to_data(apply_optimizer(elbo))\n\n\n@copy_docs_from(_OrigTraceEnum_ELBO)\nclass TraceEnum_ELBO(ELBO):\n def differentiable_loss(self, model, guide, *args, **kwargs):\n\n # get batched, enumerated, to_funsor-ed traces from the guide and model\n with plate(\n size=self.num_particles\n ) if self.num_particles > 1 else contextlib.ExitStack(), enum(\n first_available_dim=(-self.max_plate_nesting - 1)\n if self.max_plate_nesting\n else None\n ):\n guide_tr = trace(guide).get_trace(*args, **kwargs)\n model_tr = trace(replay(model, trace=guide_tr)).get_trace(*args, **kwargs)\n\n # extract from traces all metadata that we will need to compute the elbo\n guide_terms = terms_from_trace(guide_tr)\n model_terms = terms_from_trace(model_tr)\n\n # build up a lazy expression for the elbo\n with funsor.terms.lazy:\n # identify and contract out auxiliary variables in the model with partial_sum_product\n contracted_factors, uncontracted_factors = [], []\n for f in model_terms[\"log_factors\"]:\n if model_terms[\"measure_vars\"].intersection(f.inputs):\n contracted_factors.append(f)\n else:\n uncontracted_factors.append(f)\n contracted_costs = []\n # incorporate the effects of subsampling and handlers.scale through a common scale factor\n for group_factors, group_vars in _partition(\n model_terms[\"log_measures\"] + contracted_factors,\n model_terms[\"measure_vars\"],\n ):\n group_factor_vars = frozenset().union(\n *[f.inputs for f in group_factors]\n )\n group_plates = model_terms[\"plate_vars\"] & group_factor_vars\n outermost_plates = frozenset.intersection(\n *(frozenset(f.inputs) & group_plates for f in group_factors)\n )\n elim_plates = group_plates - outermost_plates\n for f in funsor.sum_product.partial_sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n group_factors,\n plates=group_plates,\n eliminate=group_vars | elim_plates,\n ):\n contracted_costs.append(model_terms[\"scale\"] * f)\n\n # accumulate costs from model (logp) and guide (-logq)\n costs = contracted_costs + uncontracted_factors # model costs: logp\n costs += [-f for f in guide_terms[\"log_factors\"]] # guide costs: -logq\n\n # compute expected cost\n # Cf. pyro.infer.util.Dice.compute_expectation()\n # https://github.com/pyro-ppl/pyro/blob/0.3.0/pyro/infer/util.py#L212\n # TODO Replace this with funsor.Expectation\n plate_vars = guide_terms[\"plate_vars\"] | model_terms[\"plate_vars\"]\n # compute the marginal logq in the guide corresponding to each cost term\n targets = dict()\n for cost in costs:\n input_vars = frozenset(cost.inputs)\n if input_vars not in targets:\n targets[input_vars] = funsor.Tensor(\n funsor.ops.new_zeros(\n funsor.tensor.get_default_prototype(),\n tuple(v.size for v in cost.inputs.values()),\n ),\n cost.inputs,\n cost.dtype,\n )\n with AdjointTape() as tape:\n logzq = funsor.sum_product.sum_product(\n funsor.ops.logaddexp,\n funsor.ops.add,\n guide_terms[\"log_measures\"] + list(targets.values()),\n plates=plate_vars,\n eliminate=(plate_vars | guide_terms[\"measure_vars\"]),\n )\n marginals = tape.adjoint(\n funsor.ops.logaddexp, funsor.ops.add, logzq, tuple(targets.values())\n )\n # finally, integrate out guide variables in the elbo and all plates\n elbo = to_funsor(0, output=funsor.Real)\n for cost in costs:\n target = targets[frozenset(cost.inputs)]\n logzq_local = marginals[target].reduce(\n funsor.ops.logaddexp, frozenset(cost.inputs) - plate_vars\n )\n log_prob = marginals[target] - logzq_local\n elbo_term = funsor.Integrate(\n log_prob,\n cost,\n guide_terms[\"measure_vars\"] & frozenset(log_prob.inputs),\n )\n elbo += elbo_term.reduce(\n funsor.ops.add, plate_vars & frozenset(cost.inputs)\n )\n\n # evaluate the elbo, using memoize to share tensor computation where possible\n with funsor.interpretations.memoize():\n return -to_data(apply_optimizer(elbo))\n\n\nclass JitTraceEnum_ELBO(Jit_ELBO, TraceEnum_ELBO):\n pass\n\n\nclass JitTraceMarkovEnum_ELBO(Jit_ELBO, TraceMarkovEnum_ELBO):\n pass\n", "path": "pyro/contrib/funsor/infer/traceenum_elbo.py"}]}
| 4,048 | 486 |
gh_patches_debug_38118
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-171
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support rlp==0.4.7
* Version: 3.7.2
* Python: 2.7
* OS: osx
### What was wrong?
https://github.com/ethereum/pydevp2p requires `rlp==0.4.7` ([here](https://github.com/ethereum/pydevp2p/blob/develop/requirements.txt#L11)), but `web3.py` requires `rlp>=0.4.6,<0.4.7`. This causes headaches when a project requires both packages as well as `rlp` independently.
I've created a repo with updated requirements here: https://github.com/karlfloersch/web3.py, but I haven't verified that everything is working properly.
#### Cute Animal Picture

Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import os
4 import sys
5
6 from setuptools import (
7 setup,
8 find_packages,
9 )
10
11
12 DIR = os.path.dirname(os.path.abspath(__file__))
13
14
15 readme = open(os.path.join(DIR, 'README.md')).read()
16
17 install_requires=[
18 "ethereum-abi-utils>=0.4.0",
19 "ethereum-utils>=0.2.0",
20 "pylru>=1.0.9",
21 "pysha3>=0.3",
22 "requests>=2.12.4",
23 "rlp>=0.4.6,<0.4.7",
24 ]
25
26 if sys.platform == 'win32':
27 install_requires.append('pypiwin32')
28
29 setup(
30 name='web3',
31 version='3.7.2',
32 description="""Web3.py""",
33 long_description=readme,
34 author='Piper Merriam',
35 author_email='[email protected]',
36 url='https://github.com/pipermerriam/web3.py',
37 include_package_data=True,
38 install_requires=install_requires,
39 extras_require={
40 'Tester': ["eth-testrpc>=1.1.0"],
41 'tester': ["eth-testrpc>=1.1.0"],
42 'gevent': [
43 "gevent>=1.1.1,<1.2.0",
44 "geventhttpclient>=1.3.1",
45 ],
46 },
47 py_modules=['web3'],
48 license="MIT",
49 zip_safe=False,
50 keywords='ethereum',
51 packages=find_packages(exclude=["tests", "tests.*"]),
52 classifiers=[
53 'Development Status :: 2 - Pre-Alpha',
54 'Intended Audience :: Developers',
55 'License :: OSI Approved :: MIT License',
56 'Natural Language :: English',
57 'Programming Language :: Python :: 2',
58 'Programming Language :: Python :: 2.7',
59 'Programming Language :: Python :: 3',
60 'Programming Language :: Python :: 3.4',
61 'Programming Language :: Python :: 3.5',
62 ],
63 )
64
```
Path: `web3/providers/manager.py`
Content:
```
1 import uuid
2 import json
3 import collections
4
5 import rlp
6
7 from eth_utils import (
8 force_text,
9 to_normalized_address,
10 is_string,
11 is_dict,
12 encode_hex,
13 decode_hex,
14 keccak,
15 )
16
17 from web3.utils.encoding import (
18 to_decimal,
19 )
20 from web3.utils.transactions import (
21 is_bitcoin_available,
22 Transaction,
23 serialize_transaction,
24 add_signature_to_transaction,
25 )
26 from web3.utils.compat import (
27 spawn,
28 )
29
30
31 class RequestManager(object):
32 def __init__(self, provider):
33 self.pending_requests = {}
34 self.provider = provider
35
36 def setProvider(self, provider):
37 self.provider = provider
38
39 def request_blocking(self, method, params):
40 """
41 Make a synchronous request using the provider
42 """
43 response_raw = self.provider.make_request(method, params)
44
45 if is_string(response_raw):
46 response = json.loads(force_text(response_raw))
47 elif is_dict(response_raw):
48 response = response_raw
49
50 if "error" in response:
51 raise ValueError(response["error"])
52
53 return response['result']
54
55 def request_async(self, method, params):
56 request_id = uuid.uuid4()
57 self.pending_requests[request_id] = spawn(
58 self.request_blocking,
59 method,
60 params,
61 )
62 return request_id
63
64 def receive_blocking(self, request_id, timeout=None):
65 try:
66 request = self.pending_requests.pop(request_id)
67 except KeyError:
68 raise KeyError("Request for id:{0} not found".format(request_id))
69 else:
70 response_raw = request.get(timeout=timeout)
71
72 response = json.loads(response_raw)
73
74 if "error" in response:
75 raise ValueError(response["error"])
76
77 return response['result']
78
79 def receive_async(self, request_id, *args, **kwargs):
80 raise NotImplementedError("Callback pattern not implemented")
81
82
83 class ManagerWrapper(object):
84 def __init__(self, wrapped_manager):
85 self.wrapped_manager = wrapped_manager
86
87 @property
88 def provider(self):
89 return self.wrapped_manager.provider
90
91 @property
92 def pending_requests(self):
93 return self.wrapped_manager.pending_requests
94
95 def setProvider(self, provider):
96 self.wrapped_manager.provider = provider
97
98 def request_blocking(self, *args, **kwargs):
99 return self.wrapped_manager.request_blocking(*args, **kwargs)
100
101 def request_async(self, *args, **kwargs):
102 return self.wrapped_manager.request_async(*args, **kwargs)
103
104 def receive_blocking(self, *args, **kwargs):
105 return self.wrapped_manager.receive_blocking(*args, **kwargs)
106
107 def receive_async(self, *args, **kwargs):
108 return self.wrapped_manager.receive_async(*args, **kwargs)
109
110
111 class BaseSendRawTransactionMixin(ManagerWrapper):
112 _known_transactions = None
113 _known_nonces = None
114
115 def __init__(self, *args, **kwargs):
116 self._known_transactions = collections.defaultdict(set)
117 self._known_nonces = collections.defaultdict(set)
118 super(BaseSendRawTransactionMixin, self).__init__(*args, **kwargs)
119
120 def _get_nonces_and_cleanup(self, addr, chain_nonce):
121 all_txns = {
122 txn_hash: self.request_blocking(
123 'eth_getTransactionByHash',
124 [txn_hash],
125 ) for txn_hash in self._known_transactions[addr]
126 }
127 for txn_hash, txn in all_txns.items():
128 if txn is None:
129 continue
130 txn_nonce = to_decimal(txn['nonce'])
131 if txn_nonce < chain_nonce:
132 self._known_transactions[addr].discard(txn_hash)
133 else:
134 yield txn_nonce
135
136 all_known_nonces = tuple(self._known_nonces[addr])
137 for nonce in all_known_nonces:
138 if nonce < chain_nonce:
139 self._known_nonces[addr].discard(nonce)
140 else:
141 yield nonce
142
143 def get_chain_nonce(self, addr):
144 chain_nonce = to_decimal(self.request_blocking(
145 'eth_getTransactionCount',
146 [addr, 'pending']
147 ))
148 return chain_nonce
149
150 def get_nonce(self, addr):
151 chain_nonce = self.get_chain_nonce(addr)
152 tracked_txn_nonces = tuple(self._get_nonces_and_cleanup(addr, chain_nonce))
153 nonce = max(0, chain_nonce, *tracked_txn_nonces)
154 if nonce == 0 and not tracked_txn_nonces:
155 return -1
156 else:
157 return nonce
158
159 def get_transaction_signature(self, serialized_txn):
160 raise NotImplementedError("Must be implemented by subclasses")
161
162 def sign_and_serialize_transaction(self, transaction):
163 serialized_txn = serialize_transaction(transaction)
164 signature = self.get_transaction_signature(transaction)
165 signed_transaction = add_signature_to_transaction(
166 serialized_txn,
167 signature,
168 )
169 signed_and_serialized_txn = rlp.encode(signed_transaction, Transaction)
170 return signed_and_serialized_txn
171
172 def construct_full_transaction(self, base_transaction):
173 txn_from = base_transaction['from']
174 full_txn = dict(**base_transaction)
175 full_txn.setdefault('nonce', self.get_nonce(txn_from) + 1)
176 full_txn.setdefault('gasPrice', self.request_blocking(
177 'eth_gasPrice', []
178 ))
179 full_txn.setdefault('gas', hex(90000))
180 full_txn.setdefault('value', '0x0')
181 full_txn.setdefault('to', '')
182 full_txn.setdefault('data', '')
183 return full_txn
184
185 TXN_SENDING_METHODS = {
186 'eth_sendTransaction',
187 'eth_sendRawTransaction',
188 'personal_signAndSendTransaction',
189 'personal_sendTransaction',
190 }
191
192 def request_blocking(self, method, params):
193 if method == 'eth_sendTransaction':
194 base_transaction = params[0]
195 # create a fully signed transaction and send through the
196 # `eth_sendRawTransaction` endpoint instead.
197 full_transaction = self.construct_full_transaction(base_transaction)
198 raw_transaction_bytes = self.sign_and_serialize_transaction(
199 full_transaction,
200 )
201 raw_transaction_bytes_as_hex = encode_hex(raw_transaction_bytes)
202 return self.request_blocking(
203 'eth_sendRawTransaction', [raw_transaction_bytes_as_hex],
204 )
205
206 result = super(BaseSendRawTransactionMixin, self).request_blocking(
207 method, params,
208 )
209 if method in self.TXN_SENDING_METHODS:
210 if method == 'eth_sendRawTransaction':
211 txn = rlp.decode(decode_hex(params[0]), Transaction)
212 self._known_transactions[to_normalized_address(txn.sender)].add(result)
213 self._known_nonces[to_normalized_address(txn.sender)].add(txn.nonce)
214 else:
215 txn = params[0]
216 self._known_transactions[to_normalized_address(txn['from'])].add(result)
217 if 'nonce' in txn:
218 self._known_nonces[to_normalized_address(txn['from'])].add(
219 to_decimal(txn['nonce'])
220 )
221 return result
222
223
224 class DelegatedSigningManager(BaseSendRawTransactionMixin):
225 def __init__(self, *args, **kwargs):
226 self.signing_manager = kwargs.pop('signing_manager')
227 super(DelegatedSigningManager, self).__init__(*args, **kwargs)
228
229 def get_chain_nonce(self, addr):
230 signer_nonce = to_decimal(self.signing_manager.request_blocking(
231 'eth_getTransactionCount',
232 [addr, 'pending']
233 ))
234 wrapped_nonce = to_decimal(self.wrapped_manager.request_blocking(
235 'eth_getTransactionCount',
236 [addr, 'pending']
237 ))
238 return max(signer_nonce, wrapped_nonce)
239
240 def get_transaction_signature(self, transaction):
241 serialized_txn = serialize_transaction(transaction)
242 hash_to_sign = self.signing_manager.request_blocking(
243 'web3_sha3', [encode_hex(serialized_txn)],
244 )
245 signature_hex = self.signing_manager.request_blocking(
246 'eth_sign',
247 [
248 transaction['from'],
249 hash_to_sign,
250 ],
251 )
252 signature = decode_hex(signature_hex)
253 return signature
254
255
256 class PrivateKeySigningManager(BaseSendRawTransactionMixin):
257 def __init__(self, *args, **kwargs):
258 if not is_bitcoin_available():
259 raise ImportError(
260 "In order to use the `PrivateKeySigningManager` the "
261 "`bitcoin` and `secp256k1` packages must be installed."
262 )
263 self.keys = kwargs.pop('keys', {})
264 super(PrivateKeySigningManager, self).__init__(*args, **kwargs)
265
266 def register_private_key(self, key):
267 from bitcoin import privtopub
268 address = to_normalized_address(keccak(privtopub(key)[1:])[-20:])
269 self.keys[address] = key
270
271 def sign_and_serialize_transaction(self, transaction):
272 txn_from = to_normalized_address(transaction['from'])
273 if txn_from not in self.keys:
274 raise KeyError("No signing key registered for from address: {0}".format(txn_from))
275 transaction = Transaction(
276 nonce=to_decimal(transaction['nonce']),
277 gasprice=to_decimal(transaction['gasPrice']),
278 startgas=to_decimal(transaction['gas']),
279 to=transaction['to'],
280 value=to_decimal(transaction['value']),
281 data=decode_hex(transaction['data']),
282 )
283 transaction.sign(self.keys[txn_from])
284 assert to_normalized_address(transaction.sender) == txn_from
285 return rlp.encode(transaction, Transaction)
286
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
"pylru>=1.0.9",
"pysha3>=0.3",
"requests>=2.12.4",
- "rlp>=0.4.6,<0.4.7",
+ "rlp>=0.4.7",
]
if sys.platform == 'win32':
@@ -37,8 +37,7 @@
include_package_data=True,
install_requires=install_requires,
extras_require={
- 'Tester': ["eth-testrpc>=1.1.0"],
- 'tester': ["eth-testrpc>=1.1.0"],
+ 'tester': ["eth-testrpc>=1.2.0"],
'gevent': [
"gevent>=1.1.1,<1.2.0",
"geventhttpclient>=1.3.1",
diff --git a/web3/providers/manager.py b/web3/providers/manager.py
--- a/web3/providers/manager.py
+++ b/web3/providers/manager.py
@@ -1,17 +1,18 @@
-import uuid
-import json
import collections
+import json
+import uuid
+import warnings
import rlp
from eth_utils import (
+ decode_hex,
+ encode_hex,
force_text,
- to_normalized_address,
- is_string,
is_dict,
- encode_hex,
- decode_hex,
+ is_string,
keccak,
+ to_normalized_address,
)
from web3.utils.encoding import (
@@ -82,6 +83,10 @@
class ManagerWrapper(object):
def __init__(self, wrapped_manager):
+ warnings.warn(DeprecationWarning(
+ "ManagerWrapper has been deprecated and will be removed from"
+ "web3.py in subsequen releases."
+ ))
self.wrapped_manager = wrapped_manager
@property
@@ -223,6 +228,10 @@
class DelegatedSigningManager(BaseSendRawTransactionMixin):
def __init__(self, *args, **kwargs):
+ warnings.warn(DeprecationWarning(
+ "DelegatedSigningManager has been deprecated and will be removed from"
+ "web3.py in subsequen releases."
+ ))
self.signing_manager = kwargs.pop('signing_manager')
super(DelegatedSigningManager, self).__init__(*args, **kwargs)
@@ -255,6 +264,10 @@
class PrivateKeySigningManager(BaseSendRawTransactionMixin):
def __init__(self, *args, **kwargs):
+ warnings.warn(DeprecationWarning(
+ "PrivateKeySigningManager has been deprecated and will be removed from"
+ "web3.py in subsequen releases."
+ ))
if not is_bitcoin_available():
raise ImportError(
"In order to use the `PrivateKeySigningManager` the "
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n \"pylru>=1.0.9\",\n \"pysha3>=0.3\",\n \"requests>=2.12.4\",\n- \"rlp>=0.4.6,<0.4.7\",\n+ \"rlp>=0.4.7\",\n ]\n \n if sys.platform == 'win32':\n@@ -37,8 +37,7 @@\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n- 'Tester': [\"eth-testrpc>=1.1.0\"],\n- 'tester': [\"eth-testrpc>=1.1.0\"],\n+ 'tester': [\"eth-testrpc>=1.2.0\"],\n 'gevent': [\n \"gevent>=1.1.1,<1.2.0\",\n \"geventhttpclient>=1.3.1\",\ndiff --git a/web3/providers/manager.py b/web3/providers/manager.py\n--- a/web3/providers/manager.py\n+++ b/web3/providers/manager.py\n@@ -1,17 +1,18 @@\n-import uuid\n-import json\n import collections\n+import json\n+import uuid\n+import warnings\n \n import rlp\n \n from eth_utils import (\n+ decode_hex,\n+ encode_hex,\n force_text,\n- to_normalized_address,\n- is_string,\n is_dict,\n- encode_hex,\n- decode_hex,\n+ is_string,\n keccak,\n+ to_normalized_address,\n )\n \n from web3.utils.encoding import (\n@@ -82,6 +83,10 @@\n \n class ManagerWrapper(object):\n def __init__(self, wrapped_manager):\n+ warnings.warn(DeprecationWarning(\n+ \"ManagerWrapper has been deprecated and will be removed from\"\n+ \"web3.py in subsequen releases.\"\n+ ))\n self.wrapped_manager = wrapped_manager\n \n @property\n@@ -223,6 +228,10 @@\n \n class DelegatedSigningManager(BaseSendRawTransactionMixin):\n def __init__(self, *args, **kwargs):\n+ warnings.warn(DeprecationWarning(\n+ \"DelegatedSigningManager has been deprecated and will be removed from\"\n+ \"web3.py in subsequen releases.\"\n+ ))\n self.signing_manager = kwargs.pop('signing_manager')\n super(DelegatedSigningManager, self).__init__(*args, **kwargs)\n \n@@ -255,6 +264,10 @@\n \n class PrivateKeySigningManager(BaseSendRawTransactionMixin):\n def __init__(self, *args, **kwargs):\n+ warnings.warn(DeprecationWarning(\n+ \"PrivateKeySigningManager has been deprecated and will be removed from\"\n+ \"web3.py in subsequen releases.\"\n+ ))\n if not is_bitcoin_available():\n raise ImportError(\n \"In order to use the `PrivateKeySigningManager` the \"\n", "issue": "Support rlp==0.4.7\n* Version: 3.7.2\r\n* Python: 2.7\r\n* OS: osx\r\n\r\n### What was wrong?\r\nhttps://github.com/ethereum/pydevp2p requires `rlp==0.4.7` ([here](https://github.com/ethereum/pydevp2p/blob/develop/requirements.txt#L11)), but `web3.py` requires `rlp>=0.4.6,<0.4.7`. This causes headaches when a project requires both packages as well as `rlp` independently.\r\n\r\nI've created a repo with updated requirements here: https://github.com/karlfloersch/web3.py, but I haven't verified that everything is working properly.\r\n\r\n#### Cute Animal Picture\r\n\r\n\r\nThank you!\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nfrom setuptools import (\n setup,\n find_packages,\n)\n\n\nDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nreadme = open(os.path.join(DIR, 'README.md')).read()\n\ninstall_requires=[\n \"ethereum-abi-utils>=0.4.0\",\n \"ethereum-utils>=0.2.0\",\n \"pylru>=1.0.9\",\n \"pysha3>=0.3\",\n \"requests>=2.12.4\",\n \"rlp>=0.4.6,<0.4.7\",\n]\n\nif sys.platform == 'win32':\n install_requires.append('pypiwin32')\n\nsetup(\n name='web3',\n version='3.7.2',\n description=\"\"\"Web3.py\"\"\",\n long_description=readme,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/pipermerriam/web3.py',\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'Tester': [\"eth-testrpc>=1.1.0\"],\n 'tester': [\"eth-testrpc>=1.1.0\"],\n 'gevent': [\n \"gevent>=1.1.1,<1.2.0\",\n \"geventhttpclient>=1.3.1\",\n ],\n },\n py_modules=['web3'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n)\n", "path": "setup.py"}, {"content": "import uuid\nimport json\nimport collections\n\nimport rlp\n\nfrom eth_utils import (\n force_text,\n to_normalized_address,\n is_string,\n is_dict,\n encode_hex,\n decode_hex,\n keccak,\n)\n\nfrom web3.utils.encoding import (\n to_decimal,\n)\nfrom web3.utils.transactions import (\n is_bitcoin_available,\n Transaction,\n serialize_transaction,\n add_signature_to_transaction,\n)\nfrom web3.utils.compat import (\n spawn,\n)\n\n\nclass RequestManager(object):\n def __init__(self, provider):\n self.pending_requests = {}\n self.provider = provider\n\n def setProvider(self, provider):\n self.provider = provider\n\n def request_blocking(self, method, params):\n \"\"\"\n Make a synchronous request using the provider\n \"\"\"\n response_raw = self.provider.make_request(method, params)\n\n if is_string(response_raw):\n response = json.loads(force_text(response_raw))\n elif is_dict(response_raw):\n response = response_raw\n\n if \"error\" in response:\n raise ValueError(response[\"error\"])\n\n return response['result']\n\n def request_async(self, method, params):\n request_id = uuid.uuid4()\n self.pending_requests[request_id] = spawn(\n self.request_blocking,\n method,\n params,\n )\n return request_id\n\n def receive_blocking(self, request_id, timeout=None):\n try:\n request = self.pending_requests.pop(request_id)\n except KeyError:\n raise KeyError(\"Request for id:{0} not found\".format(request_id))\n else:\n response_raw = request.get(timeout=timeout)\n\n response = json.loads(response_raw)\n\n if \"error\" in response:\n raise ValueError(response[\"error\"])\n\n return response['result']\n\n def receive_async(self, request_id, *args, **kwargs):\n raise NotImplementedError(\"Callback pattern not implemented\")\n\n\nclass ManagerWrapper(object):\n def __init__(self, wrapped_manager):\n self.wrapped_manager = wrapped_manager\n\n @property\n def provider(self):\n return self.wrapped_manager.provider\n\n @property\n def pending_requests(self):\n return self.wrapped_manager.pending_requests\n\n def setProvider(self, provider):\n self.wrapped_manager.provider = provider\n\n def request_blocking(self, *args, **kwargs):\n return self.wrapped_manager.request_blocking(*args, **kwargs)\n\n def request_async(self, *args, **kwargs):\n return self.wrapped_manager.request_async(*args, **kwargs)\n\n def receive_blocking(self, *args, **kwargs):\n return self.wrapped_manager.receive_blocking(*args, **kwargs)\n\n def receive_async(self, *args, **kwargs):\n return self.wrapped_manager.receive_async(*args, **kwargs)\n\n\nclass BaseSendRawTransactionMixin(ManagerWrapper):\n _known_transactions = None\n _known_nonces = None\n\n def __init__(self, *args, **kwargs):\n self._known_transactions = collections.defaultdict(set)\n self._known_nonces = collections.defaultdict(set)\n super(BaseSendRawTransactionMixin, self).__init__(*args, **kwargs)\n\n def _get_nonces_and_cleanup(self, addr, chain_nonce):\n all_txns = {\n txn_hash: self.request_blocking(\n 'eth_getTransactionByHash',\n [txn_hash],\n ) for txn_hash in self._known_transactions[addr]\n }\n for txn_hash, txn in all_txns.items():\n if txn is None:\n continue\n txn_nonce = to_decimal(txn['nonce'])\n if txn_nonce < chain_nonce:\n self._known_transactions[addr].discard(txn_hash)\n else:\n yield txn_nonce\n\n all_known_nonces = tuple(self._known_nonces[addr])\n for nonce in all_known_nonces:\n if nonce < chain_nonce:\n self._known_nonces[addr].discard(nonce)\n else:\n yield nonce\n\n def get_chain_nonce(self, addr):\n chain_nonce = to_decimal(self.request_blocking(\n 'eth_getTransactionCount',\n [addr, 'pending']\n ))\n return chain_nonce\n\n def get_nonce(self, addr):\n chain_nonce = self.get_chain_nonce(addr)\n tracked_txn_nonces = tuple(self._get_nonces_and_cleanup(addr, chain_nonce))\n nonce = max(0, chain_nonce, *tracked_txn_nonces)\n if nonce == 0 and not tracked_txn_nonces:\n return -1\n else:\n return nonce\n\n def get_transaction_signature(self, serialized_txn):\n raise NotImplementedError(\"Must be implemented by subclasses\")\n\n def sign_and_serialize_transaction(self, transaction):\n serialized_txn = serialize_transaction(transaction)\n signature = self.get_transaction_signature(transaction)\n signed_transaction = add_signature_to_transaction(\n serialized_txn,\n signature,\n )\n signed_and_serialized_txn = rlp.encode(signed_transaction, Transaction)\n return signed_and_serialized_txn\n\n def construct_full_transaction(self, base_transaction):\n txn_from = base_transaction['from']\n full_txn = dict(**base_transaction)\n full_txn.setdefault('nonce', self.get_nonce(txn_from) + 1)\n full_txn.setdefault('gasPrice', self.request_blocking(\n 'eth_gasPrice', []\n ))\n full_txn.setdefault('gas', hex(90000))\n full_txn.setdefault('value', '0x0')\n full_txn.setdefault('to', '')\n full_txn.setdefault('data', '')\n return full_txn\n\n TXN_SENDING_METHODS = {\n 'eth_sendTransaction',\n 'eth_sendRawTransaction',\n 'personal_signAndSendTransaction',\n 'personal_sendTransaction',\n }\n\n def request_blocking(self, method, params):\n if method == 'eth_sendTransaction':\n base_transaction = params[0]\n # create a fully signed transaction and send through the\n # `eth_sendRawTransaction` endpoint instead.\n full_transaction = self.construct_full_transaction(base_transaction)\n raw_transaction_bytes = self.sign_and_serialize_transaction(\n full_transaction,\n )\n raw_transaction_bytes_as_hex = encode_hex(raw_transaction_bytes)\n return self.request_blocking(\n 'eth_sendRawTransaction', [raw_transaction_bytes_as_hex],\n )\n\n result = super(BaseSendRawTransactionMixin, self).request_blocking(\n method, params,\n )\n if method in self.TXN_SENDING_METHODS:\n if method == 'eth_sendRawTransaction':\n txn = rlp.decode(decode_hex(params[0]), Transaction)\n self._known_transactions[to_normalized_address(txn.sender)].add(result)\n self._known_nonces[to_normalized_address(txn.sender)].add(txn.nonce)\n else:\n txn = params[0]\n self._known_transactions[to_normalized_address(txn['from'])].add(result)\n if 'nonce' in txn:\n self._known_nonces[to_normalized_address(txn['from'])].add(\n to_decimal(txn['nonce'])\n )\n return result\n\n\nclass DelegatedSigningManager(BaseSendRawTransactionMixin):\n def __init__(self, *args, **kwargs):\n self.signing_manager = kwargs.pop('signing_manager')\n super(DelegatedSigningManager, self).__init__(*args, **kwargs)\n\n def get_chain_nonce(self, addr):\n signer_nonce = to_decimal(self.signing_manager.request_blocking(\n 'eth_getTransactionCount',\n [addr, 'pending']\n ))\n wrapped_nonce = to_decimal(self.wrapped_manager.request_blocking(\n 'eth_getTransactionCount',\n [addr, 'pending']\n ))\n return max(signer_nonce, wrapped_nonce)\n\n def get_transaction_signature(self, transaction):\n serialized_txn = serialize_transaction(transaction)\n hash_to_sign = self.signing_manager.request_blocking(\n 'web3_sha3', [encode_hex(serialized_txn)],\n )\n signature_hex = self.signing_manager.request_blocking(\n 'eth_sign',\n [\n transaction['from'],\n hash_to_sign,\n ],\n )\n signature = decode_hex(signature_hex)\n return signature\n\n\nclass PrivateKeySigningManager(BaseSendRawTransactionMixin):\n def __init__(self, *args, **kwargs):\n if not is_bitcoin_available():\n raise ImportError(\n \"In order to use the `PrivateKeySigningManager` the \"\n \"`bitcoin` and `secp256k1` packages must be installed.\"\n )\n self.keys = kwargs.pop('keys', {})\n super(PrivateKeySigningManager, self).__init__(*args, **kwargs)\n\n def register_private_key(self, key):\n from bitcoin import privtopub\n address = to_normalized_address(keccak(privtopub(key)[1:])[-20:])\n self.keys[address] = key\n\n def sign_and_serialize_transaction(self, transaction):\n txn_from = to_normalized_address(transaction['from'])\n if txn_from not in self.keys:\n raise KeyError(\"No signing key registered for from address: {0}\".format(txn_from))\n transaction = Transaction(\n nonce=to_decimal(transaction['nonce']),\n gasprice=to_decimal(transaction['gasPrice']),\n startgas=to_decimal(transaction['gas']),\n to=transaction['to'],\n value=to_decimal(transaction['value']),\n data=decode_hex(transaction['data']),\n )\n transaction.sign(self.keys[txn_from])\n assert to_normalized_address(transaction.sender) == txn_from\n return rlp.encode(transaction, Transaction)\n", "path": "web3/providers/manager.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nfrom setuptools import (\n setup,\n find_packages,\n)\n\n\nDIR = os.path.dirname(os.path.abspath(__file__))\n\n\nreadme = open(os.path.join(DIR, 'README.md')).read()\n\ninstall_requires=[\n \"ethereum-abi-utils>=0.4.0\",\n \"ethereum-utils>=0.2.0\",\n \"pylru>=1.0.9\",\n \"pysha3>=0.3\",\n \"requests>=2.12.4\",\n \"rlp>=0.4.7\",\n]\n\nif sys.platform == 'win32':\n install_requires.append('pypiwin32')\n\nsetup(\n name='web3',\n version='3.7.2',\n description=\"\"\"Web3.py\"\"\",\n long_description=readme,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/pipermerriam/web3.py',\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'tester': [\"eth-testrpc>=1.2.0\"],\n 'gevent': [\n \"gevent>=1.1.1,<1.2.0\",\n \"geventhttpclient>=1.3.1\",\n ],\n },\n py_modules=['web3'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n)\n", "path": "setup.py"}, {"content": "import collections\nimport json\nimport uuid\nimport warnings\n\nimport rlp\n\nfrom eth_utils import (\n decode_hex,\n encode_hex,\n force_text,\n is_dict,\n is_string,\n keccak,\n to_normalized_address,\n)\n\nfrom web3.utils.encoding import (\n to_decimal,\n)\nfrom web3.utils.transactions import (\n is_bitcoin_available,\n Transaction,\n serialize_transaction,\n add_signature_to_transaction,\n)\nfrom web3.utils.compat import (\n spawn,\n)\n\n\nclass RequestManager(object):\n def __init__(self, provider):\n self.pending_requests = {}\n self.provider = provider\n\n def setProvider(self, provider):\n self.provider = provider\n\n def request_blocking(self, method, params):\n \"\"\"\n Make a synchronous request using the provider\n \"\"\"\n response_raw = self.provider.make_request(method, params)\n\n if is_string(response_raw):\n response = json.loads(force_text(response_raw))\n elif is_dict(response_raw):\n response = response_raw\n\n if \"error\" in response:\n raise ValueError(response[\"error\"])\n\n return response['result']\n\n def request_async(self, method, params):\n request_id = uuid.uuid4()\n self.pending_requests[request_id] = spawn(\n self.request_blocking,\n method,\n params,\n )\n return request_id\n\n def receive_blocking(self, request_id, timeout=None):\n try:\n request = self.pending_requests.pop(request_id)\n except KeyError:\n raise KeyError(\"Request for id:{0} not found\".format(request_id))\n else:\n response_raw = request.get(timeout=timeout)\n\n response = json.loads(response_raw)\n\n if \"error\" in response:\n raise ValueError(response[\"error\"])\n\n return response['result']\n\n def receive_async(self, request_id, *args, **kwargs):\n raise NotImplementedError(\"Callback pattern not implemented\")\n\n\nclass ManagerWrapper(object):\n def __init__(self, wrapped_manager):\n warnings.warn(DeprecationWarning(\n \"ManagerWrapper has been deprecated and will be removed from\"\n \"web3.py in subsequen releases.\"\n ))\n self.wrapped_manager = wrapped_manager\n\n @property\n def provider(self):\n return self.wrapped_manager.provider\n\n @property\n def pending_requests(self):\n return self.wrapped_manager.pending_requests\n\n def setProvider(self, provider):\n self.wrapped_manager.provider = provider\n\n def request_blocking(self, *args, **kwargs):\n return self.wrapped_manager.request_blocking(*args, **kwargs)\n\n def request_async(self, *args, **kwargs):\n return self.wrapped_manager.request_async(*args, **kwargs)\n\n def receive_blocking(self, *args, **kwargs):\n return self.wrapped_manager.receive_blocking(*args, **kwargs)\n\n def receive_async(self, *args, **kwargs):\n return self.wrapped_manager.receive_async(*args, **kwargs)\n\n\nclass BaseSendRawTransactionMixin(ManagerWrapper):\n _known_transactions = None\n _known_nonces = None\n\n def __init__(self, *args, **kwargs):\n self._known_transactions = collections.defaultdict(set)\n self._known_nonces = collections.defaultdict(set)\n super(BaseSendRawTransactionMixin, self).__init__(*args, **kwargs)\n\n def _get_nonces_and_cleanup(self, addr, chain_nonce):\n all_txns = {\n txn_hash: self.request_blocking(\n 'eth_getTransactionByHash',\n [txn_hash],\n ) for txn_hash in self._known_transactions[addr]\n }\n for txn_hash, txn in all_txns.items():\n if txn is None:\n continue\n txn_nonce = to_decimal(txn['nonce'])\n if txn_nonce < chain_nonce:\n self._known_transactions[addr].discard(txn_hash)\n else:\n yield txn_nonce\n\n all_known_nonces = tuple(self._known_nonces[addr])\n for nonce in all_known_nonces:\n if nonce < chain_nonce:\n self._known_nonces[addr].discard(nonce)\n else:\n yield nonce\n\n def get_chain_nonce(self, addr):\n chain_nonce = to_decimal(self.request_blocking(\n 'eth_getTransactionCount',\n [addr, 'pending']\n ))\n return chain_nonce\n\n def get_nonce(self, addr):\n chain_nonce = self.get_chain_nonce(addr)\n tracked_txn_nonces = tuple(self._get_nonces_and_cleanup(addr, chain_nonce))\n nonce = max(0, chain_nonce, *tracked_txn_nonces)\n if nonce == 0 and not tracked_txn_nonces:\n return -1\n else:\n return nonce\n\n def get_transaction_signature(self, serialized_txn):\n raise NotImplementedError(\"Must be implemented by subclasses\")\n\n def sign_and_serialize_transaction(self, transaction):\n serialized_txn = serialize_transaction(transaction)\n signature = self.get_transaction_signature(transaction)\n signed_transaction = add_signature_to_transaction(\n serialized_txn,\n signature,\n )\n signed_and_serialized_txn = rlp.encode(signed_transaction, Transaction)\n return signed_and_serialized_txn\n\n def construct_full_transaction(self, base_transaction):\n txn_from = base_transaction['from']\n full_txn = dict(**base_transaction)\n full_txn.setdefault('nonce', self.get_nonce(txn_from) + 1)\n full_txn.setdefault('gasPrice', self.request_blocking(\n 'eth_gasPrice', []\n ))\n full_txn.setdefault('gas', hex(90000))\n full_txn.setdefault('value', '0x0')\n full_txn.setdefault('to', '')\n full_txn.setdefault('data', '')\n return full_txn\n\n TXN_SENDING_METHODS = {\n 'eth_sendTransaction',\n 'eth_sendRawTransaction',\n 'personal_signAndSendTransaction',\n 'personal_sendTransaction',\n }\n\n def request_blocking(self, method, params):\n if method == 'eth_sendTransaction':\n base_transaction = params[0]\n # create a fully signed transaction and send through the\n # `eth_sendRawTransaction` endpoint instead.\n full_transaction = self.construct_full_transaction(base_transaction)\n raw_transaction_bytes = self.sign_and_serialize_transaction(\n full_transaction,\n )\n raw_transaction_bytes_as_hex = encode_hex(raw_transaction_bytes)\n return self.request_blocking(\n 'eth_sendRawTransaction', [raw_transaction_bytes_as_hex],\n )\n\n result = super(BaseSendRawTransactionMixin, self).request_blocking(\n method, params,\n )\n if method in self.TXN_SENDING_METHODS:\n if method == 'eth_sendRawTransaction':\n txn = rlp.decode(decode_hex(params[0]), Transaction)\n self._known_transactions[to_normalized_address(txn.sender)].add(result)\n self._known_nonces[to_normalized_address(txn.sender)].add(txn.nonce)\n else:\n txn = params[0]\n self._known_transactions[to_normalized_address(txn['from'])].add(result)\n if 'nonce' in txn:\n self._known_nonces[to_normalized_address(txn['from'])].add(\n to_decimal(txn['nonce'])\n )\n return result\n\n\nclass DelegatedSigningManager(BaseSendRawTransactionMixin):\n def __init__(self, *args, **kwargs):\n warnings.warn(DeprecationWarning(\n \"DelegatedSigningManager has been deprecated and will be removed from\"\n \"web3.py in subsequen releases.\"\n ))\n self.signing_manager = kwargs.pop('signing_manager')\n super(DelegatedSigningManager, self).__init__(*args, **kwargs)\n\n def get_chain_nonce(self, addr):\n signer_nonce = to_decimal(self.signing_manager.request_blocking(\n 'eth_getTransactionCount',\n [addr, 'pending']\n ))\n wrapped_nonce = to_decimal(self.wrapped_manager.request_blocking(\n 'eth_getTransactionCount',\n [addr, 'pending']\n ))\n return max(signer_nonce, wrapped_nonce)\n\n def get_transaction_signature(self, transaction):\n serialized_txn = serialize_transaction(transaction)\n hash_to_sign = self.signing_manager.request_blocking(\n 'web3_sha3', [encode_hex(serialized_txn)],\n )\n signature_hex = self.signing_manager.request_blocking(\n 'eth_sign',\n [\n transaction['from'],\n hash_to_sign,\n ],\n )\n signature = decode_hex(signature_hex)\n return signature\n\n\nclass PrivateKeySigningManager(BaseSendRawTransactionMixin):\n def __init__(self, *args, **kwargs):\n warnings.warn(DeprecationWarning(\n \"PrivateKeySigningManager has been deprecated and will be removed from\"\n \"web3.py in subsequen releases.\"\n ))\n if not is_bitcoin_available():\n raise ImportError(\n \"In order to use the `PrivateKeySigningManager` the \"\n \"`bitcoin` and `secp256k1` packages must be installed.\"\n )\n self.keys = kwargs.pop('keys', {})\n super(PrivateKeySigningManager, self).__init__(*args, **kwargs)\n\n def register_private_key(self, key):\n from bitcoin import privtopub\n address = to_normalized_address(keccak(privtopub(key)[1:])[-20:])\n self.keys[address] = key\n\n def sign_and_serialize_transaction(self, transaction):\n txn_from = to_normalized_address(transaction['from'])\n if txn_from not in self.keys:\n raise KeyError(\"No signing key registered for from address: {0}\".format(txn_from))\n transaction = Transaction(\n nonce=to_decimal(transaction['nonce']),\n gasprice=to_decimal(transaction['gasPrice']),\n startgas=to_decimal(transaction['gas']),\n to=transaction['to'],\n value=to_decimal(transaction['value']),\n data=decode_hex(transaction['data']),\n )\n transaction.sign(self.keys[txn_from])\n assert to_normalized_address(transaction.sender) == txn_from\n return rlp.encode(transaction, Transaction)\n", "path": "web3/providers/manager.py"}]}
| 3,812 | 659 |
gh_patches_debug_4597
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-16277
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ThresholdedReLU crashes when the input is a list
**System information**.
- Have I written custom code (as opposed to using a stock example script provided in Keras): Yes
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu
- TensorFlow installed from (source or binary): binary
- TensorFlow version (use command below): 2.8.0
- Python version: 3.7.12
- Bazel version (if compiling from source): N/A
- GPU model and memory: N/A
- Exact command to reproduce: https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing
**Describe the problem**.
`keras.layers.ThresholdedReLU` fails to accept a list input by reporting the following error:
```
[/usr/local/lib/python3.7/dist-packages/keras/layers/advanced_activations.py](https://localhost:8080/#) in call(self, inputs)
262
263 def call(self, inputs):
--> 264 theta = tf.cast(self.theta, inputs.dtype)
265 return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
266
AttributeError: Exception encountered when calling layer "thresholded_re_lu_1" (type ThresholdedReLU).
'list' object has no attribute 'dtype'
Call arguments received:
• inputs=['tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)']
```
In contrast, `keras.layers.ReLU` and `keras.layers.LeakyReLU` can accept the list input.
**Describe the current behavior**.
`keras.layers.ThresholdedReLU` crashes when the input is a list
**Describe the expected behavior**.
ThresholdedReLU can accept the list input.
**[Contributing](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)**.
- Do you want to contribute a PR? (yes/no):
- If yes, please read [this page](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md) for instructions
- Briefly describe your candidate solution(if contributing):
After comparing the code between `ThresholedReLU` and `ReLU`, I think the reason is that `ReLU` directly use the backend implementation: [keras/layers/activation/relu.py#L96](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/relu.py#L96) while ThresholdedReLU implements by itself: [keras/layers/activation/thresholded_relu.py#L63](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/thresholded_relu.py#L63). Not sure why does such an implementation inconsistency exist, but I think we can do something similar in the thresholded_relu.py#L61-63 like [backend.relu](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/backend.py#L4963) does:
```
def call(self, inputs):
dtype = getattr(inputs, 'dtype', floatx())
theta = tf.cast(self.theta, dtype)
return inputs * tf.cast(tf.greater(inputs, theta), dtype)
```
Of course, we can also directly use the `backend.relu` for the implementation of `ThresholdedReLU` like `ReLU` and `LeakyReLU` do.
**Standalone code to reproduce the issue**.
You can access this [link](https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing) or run the following code:
```
import keras
x = keras.layers.Input(shape=(1,10))
y = keras.layers.ThresholdedReLU()([x,x,x])
model = keras.models.Model(x,y)
model.summary()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/layers/activation/thresholded_relu.py`
Content:
```
1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Thresholded Rectified Linear Unit activation layer."""
16 # pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
17
18 from keras import backend
19 from keras.engine.base_layer import Layer
20 from keras.utils import tf_utils
21 import tensorflow.compat.v2 as tf
22
23 from tensorflow.python.util.tf_export import keras_export
24
25
26 @keras_export('keras.layers.ThresholdedReLU')
27 class ThresholdedReLU(Layer):
28 """Thresholded Rectified Linear Unit.
29
30 It follows:
31
32 ```
33 f(x) = x for x > theta
34 f(x) = 0 otherwise`
35 ```
36
37 Input shape:
38 Arbitrary. Use the keyword argument `input_shape`
39 (tuple of integers, does not include the samples axis)
40 when using this layer as the first layer in a model.
41
42 Output shape:
43 Same shape as the input.
44
45 Args:
46 theta: Float >= 0. Threshold location of activation.
47 """
48
49 def __init__(self, theta=1.0, **kwargs):
50 super(ThresholdedReLU, self).__init__(**kwargs)
51 if theta is None:
52 raise ValueError(
53 'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'
54 f' Received: {theta}')
55 if theta < 0:
56 raise ValueError('The theta value of a Thresholded ReLU layer '
57 f'should be >=0. Received: {theta}')
58 self.supports_masking = True
59 self.theta = backend.cast_to_floatx(theta)
60
61 def call(self, inputs):
62 theta = tf.cast(self.theta, inputs.dtype)
63 return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
64
65 def get_config(self):
66 config = {'theta': float(self.theta)}
67 base_config = super(ThresholdedReLU, self).get_config()
68 return dict(list(base_config.items()) + list(config.items()))
69
70 @tf_utils.shape_type_conversion
71 def compute_output_shape(self, input_shape):
72 return input_shape
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras/layers/activation/thresholded_relu.py b/keras/layers/activation/thresholded_relu.py
--- a/keras/layers/activation/thresholded_relu.py
+++ b/keras/layers/activation/thresholded_relu.py
@@ -59,8 +59,8 @@
self.theta = backend.cast_to_floatx(theta)
def call(self, inputs):
- theta = tf.cast(self.theta, inputs.dtype)
- return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
+ dtype = self.compute_dtype
+ return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)
def get_config(self):
config = {'theta': float(self.theta)}
|
{"golden_diff": "diff --git a/keras/layers/activation/thresholded_relu.py b/keras/layers/activation/thresholded_relu.py\n--- a/keras/layers/activation/thresholded_relu.py\n+++ b/keras/layers/activation/thresholded_relu.py\n@@ -59,8 +59,8 @@\n self.theta = backend.cast_to_floatx(theta)\n \n def call(self, inputs):\n- theta = tf.cast(self.theta, inputs.dtype)\n- return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)\n+ dtype = self.compute_dtype\n+ return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)\n \n def get_config(self):\n config = {'theta': float(self.theta)}\n", "issue": "ThresholdedReLU crashes when the input is a list\n**System information**.\r\n- Have I written custom code (as opposed to using a stock example script provided in Keras): Yes\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu\r\n- TensorFlow installed from (source or binary): binary\r\n- TensorFlow version (use command below): 2.8.0\r\n- Python version: 3.7.12\r\n- Bazel version (if compiling from source): N/A\r\n- GPU model and memory: N/A\r\n- Exact command to reproduce: https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing\r\n\r\n**Describe the problem**.\r\n`keras.layers.ThresholdedReLU` fails to accept a list input by reporting the following error:\r\n\r\n```\r\n[/usr/local/lib/python3.7/dist-packages/keras/layers/advanced_activations.py](https://localhost:8080/#) in call(self, inputs)\r\n 262 \r\n 263 def call(self, inputs):\r\n--> 264 theta = tf.cast(self.theta, inputs.dtype)\r\n 265 return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)\r\n 266 \r\n\r\nAttributeError: Exception encountered when calling layer \"thresholded_re_lu_1\" (type ThresholdedReLU).\r\n\r\n'list' object has no attribute 'dtype'\r\n\r\nCall arguments received:\r\n \u2022 inputs=['tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)']\r\n```\r\nIn contrast, `keras.layers.ReLU` and `keras.layers.LeakyReLU` can accept the list input.\r\n\r\n**Describe the current behavior**.\r\n`keras.layers.ThresholdedReLU` crashes when the input is a list\r\n\r\n**Describe the expected behavior**.\r\nThresholdedReLU can accept the list input.\r\n\r\n**[Contributing](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)**.\r\n\r\n- Do you want to contribute a PR? (yes/no):\r\n- If yes, please read [this page](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md) for instructions\r\n- Briefly describe your candidate solution(if contributing):\r\n\r\nAfter comparing the code between `ThresholedReLU` and `ReLU`, I think the reason is that `ReLU` directly use the backend implementation: [keras/layers/activation/relu.py#L96](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/relu.py#L96) while ThresholdedReLU implements by itself: [keras/layers/activation/thresholded_relu.py#L63](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/thresholded_relu.py#L63). Not sure why does such an implementation inconsistency exist, but I think we can do something similar in the thresholded_relu.py#L61-63 like [backend.relu](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/backend.py#L4963) does:\r\n\r\n```\r\ndef call(self, inputs):\r\n dtype = getattr(inputs, 'dtype', floatx())\r\n theta = tf.cast(self.theta, dtype)\r\n return inputs * tf.cast(tf.greater(inputs, theta), dtype)\r\n```\r\n\r\nOf course, we can also directly use the `backend.relu` for the implementation of `ThresholdedReLU` like `ReLU` and `LeakyReLU` do.\r\n\r\n**Standalone code to reproduce the issue**.\r\nYou can access this [link](https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing) or run the following code:\r\n\r\n```\r\nimport keras\r\nx = keras.layers.Input(shape=(1,10))\r\ny = keras.layers.ThresholdedReLU()([x,x,x])\r\nmodel = keras.models.Model(x,y)\r\nmodel.summary()\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Thresholded Rectified Linear Unit activation layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.ThresholdedReLU')\nclass ThresholdedReLU(Layer):\n \"\"\"Thresholded Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n theta: Float >= 0. Threshold location of activation.\n \"\"\"\n\n def __init__(self, theta=1.0, **kwargs):\n super(ThresholdedReLU, self).__init__(**kwargs)\n if theta is None:\n raise ValueError(\n 'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'\n f' Received: {theta}')\n if theta < 0:\n raise ValueError('The theta value of a Thresholded ReLU layer '\n f'should be >=0. Received: {theta}')\n self.supports_masking = True\n self.theta = backend.cast_to_floatx(theta)\n\n def call(self, inputs):\n theta = tf.cast(self.theta, inputs.dtype)\n return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)\n\n def get_config(self):\n config = {'theta': float(self.theta)}\n base_config = super(ThresholdedReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "keras/layers/activation/thresholded_relu.py"}], "after_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Thresholded Rectified Linear Unit activation layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.ThresholdedReLU')\nclass ThresholdedReLU(Layer):\n \"\"\"Thresholded Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n theta: Float >= 0. Threshold location of activation.\n \"\"\"\n\n def __init__(self, theta=1.0, **kwargs):\n super(ThresholdedReLU, self).__init__(**kwargs)\n if theta is None:\n raise ValueError(\n 'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'\n f' Received: {theta}')\n if theta < 0:\n raise ValueError('The theta value of a Thresholded ReLU layer '\n f'should be >=0. Received: {theta}')\n self.supports_masking = True\n self.theta = backend.cast_to_floatx(theta)\n\n def call(self, inputs):\n dtype = self.compute_dtype\n return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)\n\n def get_config(self):\n config = {'theta': float(self.theta)}\n base_config = super(ThresholdedReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "keras/layers/activation/thresholded_relu.py"}]}
| 1,993 | 162 |
gh_patches_debug_2919
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-1538
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VS 2017 backend emits bad WindowsTargetPlatformVersion value
When I tried generating a VS 2017 solution, the generated app.vcxproj contained this:
```
<WindowsTargetPlatformVersion>10.0.14393.0\</WindowsTargetPlatformVersion>
```
Which then causes errors in other `.targets` files attempting to do a numeric comparison against that.
This value is probably taken straight from one of these environment variables:
```
WindowsSDKLibVersion=10.0.14393.0\
WindowsSDKVersion=10.0.14393.0\
```
The trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to
```
WindowsSdkDir=C:\Program Files (x86)\Windows Kits\10\
```
directly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/backend/vs2017backend.py`
Content:
```
1 # Copyright 2014-2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from .vs2010backend import Vs2010Backend
18
19
20 class Vs2017Backend(Vs2010Backend):
21 def __init__(self, build):
22 super().__init__(build)
23 self.name = 'vs2017'
24 self.platform_toolset = 'v141'
25 self.vs_version = '2017'
26 # WindowsSDKVersion should be set by command prompt.
27 self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py
--- a/mesonbuild/backend/vs2017backend.py
+++ b/mesonbuild/backend/vs2017backend.py
@@ -24,4 +24,4 @@
self.platform_toolset = 'v141'
self.vs_version = '2017'
# WindowsSDKVersion should be set by command prompt.
- self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)
+ self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\')
|
{"golden_diff": "diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py\n--- a/mesonbuild/backend/vs2017backend.py\n+++ b/mesonbuild/backend/vs2017backend.py\n@@ -24,4 +24,4 @@\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n- self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n+ self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\\\')\n", "issue": "VS 2017 backend emits bad WindowsTargetPlatformVersion value\nWhen I tried generating a VS 2017 solution, the generated app.vcxproj contained this:\r\n\r\n```\r\n<WindowsTargetPlatformVersion>10.0.14393.0\\</WindowsTargetPlatformVersion>\r\n```\r\n\r\nWhich then causes errors in other `.targets` files attempting to do a numeric comparison against that.\r\nThis value is probably taken straight from one of these environment variables:\r\n\r\n```\r\nWindowsSDKLibVersion=10.0.14393.0\\\r\nWindowsSDKVersion=10.0.14393.0\\\r\n```\r\n\r\nThe trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to \r\n```\r\nWindowsSdkDir=C:\\Program Files (x86)\\Windows Kits\\10\\\r\n```\r\ndirectly.\n", "before_files": [{"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n", "path": "mesonbuild/backend/vs2017backend.py"}], "after_files": [{"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\\\')\n", "path": "mesonbuild/backend/vs2017backend.py"}]}
| 750 | 144 |
gh_patches_debug_10204
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1098
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEAT][CV] Add a "per-class" option to property drift & heatmap comparison
In this per class option, the drift would be shown per class for the top drifted classes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Image Property Drift Check
4 **************************
5 This notebooks provides an overview for using and understanding the image property drift check.
6
7 **Structure:**
8
9 * `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__
10 * `Which Image Properties Are Used? <#which-image-properties-are-used>`__
11 * `Prepare data <#prepare-data>`__
12 * `Run the check <#run-the-check>`__
13 * `Define a condition <#define-a-condition>`__
14 * `Check Parameters <#check-parameters>`__
15
16 How Does the ImagePropertyDrift Check Work?
17 =================================
18 Data drift is simply a change in the distribution of data over time. It is also one
19 of the top reasons that a machine learning model performance degrades over time.
20
21 In the context of machine learning, drift between the training set and the test set
22 will likely make the model prone to errors. In other words, if the model was trained
23 on data that is different from the current test data, it will probably make more mistakes
24 predicting the target variable.
25
26 The Image Property Drift check calculates a drift score for each image property in
27 the test dataset, by comparing its distribution to the train dataset. For this, we
28 use the Earth Movers Distance (Wasserstein distance).
29
30 Which Image Properties Are Used?
31 =================================
32 ============================== ==========
33 Property name What is it
34 ============================== ==========
35 Aspect Ratio Ratio between height and width of image (height / width)
36 Area Area of image in pixels (height * width)
37 Brightness Average intensity of image pixels. Color channels have different weights according to
38 RGB-to-Grayscale formula
39 RMS Contrast Contrast of image, calculated by standard deviation of pixels
40 Mean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in
41 comparison to the other channels [r / (r + g + b)].
42 Mean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in
43 comparison to the other channels [g / (r + g + b)].
44 Mean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in
45 comparison to the other channels [b / (r + g + b)].
46 ============================== ==========
47
48 Imports
49 -------
50 """
51
52 #%%
53
54 from deepchecks.vision.datasets.detection import coco
55 from deepchecks.vision.checks.distribution import ImagePropertyDrift
56
57 #%%
58 # Prepare data
59 # ------------
60 from deepchecks.vision.utils import image_properties
61
62 train_dataset = coco.load_dataset(train=True, object_type='VisionData')
63 test_dataset = coco.load_dataset(train=False, object_type='VisionData')
64
65 #%%
66 # Run the check
67 # -------------
68
69 check_result = ImagePropertyDrift().run(train_dataset, test_dataset)
70 check_result
71
72 #%%
73 # Observe the check’s output
74 # --------------------------
75 # The result value is a pandas DataFrame that contains drift score for each image property.
76
77 check_result.value
78
79 #%%
80 # Define a condition
81 # ==================
82 # We can define a condition that make sure that image properties drift scores do not
83 # exceed allowed threshold.
84
85 check_result = (
86 ImagePropertyDrift()
87 .add_condition_drift_score_not_greater_than(0.001)
88 .run(train_dataset, test_dataset)
89 )
90 check_result.show(show_additional_outputs=False)
91
92 #%%
93 # Check Parameters
94 # ----------------
95 # Image Property Drift Check accepts two parameters that allows us to control the look of the output:
96 #
97 # * `image_properties` - list of image properties that we are interested in
98 # * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)
99 #
100 # Only next string values are allowed for the `image_properties` parameter:
101 #
102 # * `aspect_ratio`
103 # * `area`
104 # * `brightness`
105 # * `mean_red_relative_intensity`
106 # * `mean_green_relative_intensity`
107 # * `mean_blue_relative_intensity`
108
109 from typing import List
110 import numpy as np
111
112
113 def area(images: List[np.ndarray]) -> List[int]:
114 # Return list of integers of image areas (height multiplied by width)
115 return [img.shape[0] * img.shape[1] for img in images]
116
117
118 def aspect_ratio(images: List[np.ndarray]) -> List[float]:
119 # Return list of floats of image height to width ratio
120 return [img.shape[0] / img.shape[1] for img in images]
121
122
123 properties = [
124 {'name': 'Area', 'method': area, 'output_type': 'continuous'},
125 {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}
126 ]
127
128 check_result = ImagePropertyDrift(
129 alternative_image_properties=properties,
130 max_num_categories=20
131 ).run(train_dataset, test_dataset)
132
133 check_result
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py
--- a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py
+++ b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py
@@ -76,6 +76,17 @@
check_result.value
+#%%
+# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only
+# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the
+# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class
+# filtration)
+
+check_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5
+ ).run(train_dataset, test_dataset)
+check_result
+
+
#%%
# Define a condition
# ==================
|
{"golden_diff": "diff --git a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n--- a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n+++ b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n@@ -76,6 +76,17 @@\n \n check_result.value\n \n+#%%\n+# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only\n+# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the\n+# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class\n+# filtration)\n+\n+check_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5\n+ ).run(train_dataset, test_dataset)\n+check_result\n+\n+\n #%%\n # Define a condition\n # ==================\n", "issue": "[FEAT][CV] Add a \"per-class\" option to property drift & heatmap comparison\nIn this per class option, the drift would be shown per class for the top drifted classes. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nImage Property Drift Check\n**************************\nThis notebooks provides an overview for using and understanding the image property drift check.\n\n**Structure:**\n\n* `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__\n* `Which Image Properties Are Used? <#which-image-properties-are-used>`__\n* `Prepare data <#prepare-data>`__\n* `Run the check <#run-the-check>`__\n* `Define a condition <#define-a-condition>`__\n* `Check Parameters <#check-parameters>`__\n\nHow Does the ImagePropertyDrift Check Work?\n=================================\nData drift is simply a change in the distribution of data over time. It is also one\nof the top reasons that a machine learning model performance degrades over time.\n\nIn the context of machine learning, drift between the training set and the test set\nwill likely make the model prone to errors. In other words, if the model was trained\non data that is different from the current test data, it will probably make more mistakes\npredicting the target variable.\n\nThe Image Property Drift check calculates a drift score for each image property in\nthe test dataset, by comparing its distribution to the train dataset. For this, we\nuse the Earth Movers Distance (Wasserstein distance).\n\nWhich Image Properties Are Used?\n=================================\n============================== ==========\nProperty name What is it\n============================== ==========\nAspect Ratio Ratio between height and width of image (height / width)\nArea Area of image in pixels (height * width)\nBrightness Average intensity of image pixels. Color channels have different weights according to\n RGB-to-Grayscale formula\nRMS Contrast Contrast of image, calculated by standard deviation of pixels\nMean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in\n comparison to the other channels [r / (r + g + b)].\nMean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in\n comparison to the other channels [g / (r + g + b)].\nMean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in\n comparison to the other channels [b / (r + g + b)].\n============================== ==========\n\nImports\n-------\n\"\"\"\n\n#%%\n\nfrom deepchecks.vision.datasets.detection import coco\nfrom deepchecks.vision.checks.distribution import ImagePropertyDrift\n\n#%%\n# Prepare data\n# ------------\nfrom deepchecks.vision.utils import image_properties\n\ntrain_dataset = coco.load_dataset(train=True, object_type='VisionData')\ntest_dataset = coco.load_dataset(train=False, object_type='VisionData')\n\n#%%\n# Run the check \n# -------------\n\ncheck_result = ImagePropertyDrift().run(train_dataset, test_dataset)\ncheck_result\n\n#%%\n# Observe the check\u2019s output \n# --------------------------\n# The result value is a pandas DataFrame that contains drift score for each image property.\n\ncheck_result.value\n\n#%%\n# Define a condition\n# ==================\n# We can define a condition that make sure that image properties drift scores do not\n# exceed allowed threshold.\n\ncheck_result = (\n ImagePropertyDrift()\n .add_condition_drift_score_not_greater_than(0.001)\n .run(train_dataset, test_dataset)\n)\ncheck_result.show(show_additional_outputs=False)\n\n#%%\n# Check Parameters\n# ----------------\n# Image Property Drift Check accepts two parameters that allows us to control the look of the output:\n#\n# * `image_properties` - list of image properties that we are interested in\n# * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)\n#\n# Only next string values are allowed for the `image_properties` parameter:\n#\n# * `aspect_ratio`\n# * `area`\n# * `brightness`\n# * `mean_red_relative_intensity`\n# * `mean_green_relative_intensity`\n# * `mean_blue_relative_intensity`\n\nfrom typing import List\nimport numpy as np\n\n\ndef area(images: List[np.ndarray]) -> List[int]:\n # Return list of integers of image areas (height multiplied by width)\n return [img.shape[0] * img.shape[1] for img in images]\n\n\ndef aspect_ratio(images: List[np.ndarray]) -> List[float]:\n # Return list of floats of image height to width ratio\n return [img.shape[0] / img.shape[1] for img in images]\n\n\nproperties = [\n {'name': 'Area', 'method': area, 'output_type': 'continuous'},\n {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}\n]\n\ncheck_result = ImagePropertyDrift(\n alternative_image_properties=properties,\n max_num_categories=20\n).run(train_dataset, test_dataset)\n\ncheck_result", "path": "docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nImage Property Drift Check\n**************************\nThis notebooks provides an overview for using and understanding the image property drift check.\n\n**Structure:**\n\n* `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__\n* `Which Image Properties Are Used? <#which-image-properties-are-used>`__\n* `Prepare data <#prepare-data>`__\n* `Run the check <#run-the-check>`__\n* `Define a condition <#define-a-condition>`__\n* `Check Parameters <#check-parameters>`__\n\nHow Does the ImagePropertyDrift Check Work?\n=================================\nData drift is simply a change in the distribution of data over time. It is also one\nof the top reasons that a machine learning model performance degrades over time.\n\nIn the context of machine learning, drift between the training set and the test set\nwill likely make the model prone to errors. In other words, if the model was trained\non data that is different from the current test data, it will probably make more mistakes\npredicting the target variable.\n\nThe Image Property Drift check calculates a drift score for each image property in\nthe test dataset, by comparing its distribution to the train dataset. For this, we\nuse the Earth Movers Distance (Wasserstein distance).\n\nWhich Image Properties Are Used?\n=================================\n============================== ==========\nProperty name What is it\n============================== ==========\nAspect Ratio Ratio between height and width of image (height / width)\nArea Area of image in pixels (height * width)\nBrightness Average intensity of image pixels. Color channels have different weights according to\n RGB-to-Grayscale formula\nRMS Contrast Contrast of image, calculated by standard deviation of pixels\nMean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in\n comparison to the other channels [r / (r + g + b)].\nMean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in\n comparison to the other channels [g / (r + g + b)].\nMean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in\n comparison to the other channels [b / (r + g + b)].\n============================== ==========\n\nImports\n-------\n\"\"\"\n\n#%%\n\nfrom deepchecks.vision.datasets.detection import coco\nfrom deepchecks.vision.checks.distribution import ImagePropertyDrift\n\n#%%\n# Prepare data\n# ------------\nfrom deepchecks.vision.utils import image_properties\n\ntrain_dataset = coco.load_dataset(train=True, object_type='VisionData')\ntest_dataset = coco.load_dataset(train=False, object_type='VisionData')\n\n#%%\n# Run the check \n# -------------\n\ncheck_result = ImagePropertyDrift().run(train_dataset, test_dataset)\ncheck_result\n\n#%%\n# Observe the check\u2019s output \n# --------------------------\n# The result value is a pandas DataFrame that contains drift score for each image property.\n\ncheck_result.value\n\n#%%\n# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only\n# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the\n# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class\n# filtration)\n\ncheck_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5\n ).run(train_dataset, test_dataset)\ncheck_result\n\n\n#%%\n# Define a condition\n# ==================\n# We can define a condition that make sure that image properties drift scores do not\n# exceed allowed threshold.\n\ncheck_result = (\n ImagePropertyDrift()\n .add_condition_drift_score_not_greater_than(0.001)\n .run(train_dataset, test_dataset)\n)\ncheck_result.show(show_additional_outputs=False)\n\n#%%\n# Check Parameters\n# ----------------\n# Image Property Drift Check accepts two parameters that allows us to control the look of the output:\n#\n# * `image_properties` - list of image properties that we are interested in\n# * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)\n#\n# Only next string values are allowed for the `image_properties` parameter:\n#\n# * `aspect_ratio`\n# * `area`\n# * `brightness`\n# * `mean_red_relative_intensity`\n# * `mean_green_relative_intensity`\n# * `mean_blue_relative_intensity`\n\nfrom typing import List\nimport numpy as np\n\n\ndef area(images: List[np.ndarray]) -> List[int]:\n # Return list of integers of image areas (height multiplied by width)\n return [img.shape[0] * img.shape[1] for img in images]\n\n\ndef aspect_ratio(images: List[np.ndarray]) -> List[float]:\n # Return list of floats of image height to width ratio\n return [img.shape[0] / img.shape[1] for img in images]\n\n\nproperties = [\n {'name': 'Area', 'method': area, 'output_type': 'continuous'},\n {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}\n]\n\ncheck_result = ImagePropertyDrift(\n alternative_image_properties=properties,\n max_num_categories=20\n).run(train_dataset, test_dataset)\n\ncheck_result", "path": "docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py"}]}
| 1,646 | 226 |
gh_patches_debug_3317
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-4934
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/nn/optimizer/cpu_adam.py`
Content:
```
1 import math
2 from typing import Optional
3
4 import torch
5
6 from colossalai.kernel.op_builder import CPUAdamBuilder
7
8 from .nvme_optimizer import NVMeOptimizer
9
10
11 class CPUAdam(NVMeOptimizer):
12 """Implements Adam algorithm.
13
14 Supports parameters updating on both GPU and CPU, depending on the device of parameters.
15 But the parameters and gradients should on the same device:
16 * Parameters on CPU and gradients on CPU is allowed.
17 * Parameters on GPU and gradients on GPU is allowed.
18 * Parameters on GPU and gradients on CPU is **not** allowed.
19
20 `CPUAdam` requires CUDA extensions which can be built during installation or runtime.
21
22 This version of CPU Adam accelerates parameters updating on CPU with SIMD.
23 Support of AVX2 or AVX512 is required.
24
25 The GPU part is implemented in an naive way.
26
27 CPU Adam also supports the hybrid precision calculation, eg. fp32 parameters and fp16 gradients.
28
29 :class:`colossalai.nn.optimizer.CPUAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,
30 or ``torch.optim.Adam`` with ``adamw_mode=False``
31
32 Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
33
34 Arguments:
35 model_params (iterable): iterable of parameters of dicts defining
36 parameter groups.
37 lr (float, optional): learning rate. (default: 1e-3)
38 betas (Tuple[float, float], optional): coefficients used for computing
39 running averages of gradient and its square. (default: (0.9, 0.999))
40 eps (float, optional): term added to the denominator to improve
41 numerical stability. (default: 1e-8)
42 weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
43 amsgrad (boolean, optional): whether to use the AMSGrad variant of this
44 algorithm from the paper `On the Convergence of Adam and Beyond`_
45 (default: False) NOT SUPPORTED yet in CPUAdam!
46 adamw_mode (boolean, optional): Apply L2 regularization or weight decay
47 True for decoupled weight decay(also known as AdamW) (default: True)
48 simd_log (boolean, optional): whether to show if you are using SIMD to
49 accelerate. (default: False)
50 nvme_offload_fraction (float, optional): Fraction of optimizer states to be offloaded to NVMe. Defaults to 0.0.
51 nvme_offload_dir (Optional[str], optional): Directory to save NVMe offload files.
52 If it's ``None``, a random temporary directory will be used. Defaults to None.
53
54 .. _Adam\: A Method for Stochastic Optimization:
55 https://arxiv.org/abs/1412.6980
56 .. _On the Convergence of Adam and Beyond:
57 https://openreview.net/forum?id=ryQu7f-RZ
58 """
59
60 # Number of fp32 shards for per parameter
61 # Param weight, grad, momentum and variance
62 num_fp32_shards_per_param = 4
63
64 def __init__(
65 self,
66 model_params,
67 lr=1e-3,
68 bias_correction=True,
69 betas=(0.9, 0.999),
70 eps=1e-8,
71 weight_decay=0,
72 adamw_mode=True,
73 nvme_offload_fraction: float = 0.0,
74 nvme_offload_dir: Optional[str] = None,
75 ):
76 default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)
77 super(CPUAdam, self).__init__(model_params, default_args, nvme_offload_fraction, nvme_offload_dir)
78 self.adamw_mode = adamw_mode
79 cpu_adam = CPUAdamBuilder().load()
80 # if you find yourself stuck here, make sure that you install colossalai with CUDA_EXT=1 specification
81 self.cpu_adam_op = cpu_adam.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay, adamw_mode)
82
83 def torch_adam_update(
84 self,
85 data,
86 grad,
87 exp_avg,
88 exp_avg_sq,
89 lr,
90 beta1,
91 beta2,
92 eps,
93 weight_decay,
94 bias_correction1,
95 bias_correction2,
96 use_adamw=False,
97 ):
98 grad = grad.to(data.dtype)
99
100 if weight_decay != 0:
101 if use_adamw:
102 data.mul_(1 - lr * weight_decay)
103 else:
104 grad = grad.add(data, alpha=weight_decay)
105
106 # Decay the first and second moment running average coefficient
107 exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
108 exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
109
110 # TODO(jiaruifang) dose not support amsgrad
111 denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
112
113 step_size = lr / bias_correction1
114
115 data.addcdiv_(exp_avg, denom, value=-step_size)
116
117 @torch.no_grad()
118 def step(self, closure=None, div_scale: float = -1):
119 loss = None
120 if closure is not None:
121 with torch.enable_grad():
122 loss = closure()
123
124 self._pre_step("exp_avg", "exp_avg_sq")
125 for _, group in enumerate(self.param_groups):
126 for _, p in enumerate(group["params"]):
127 if p.grad is None:
128 continue
129
130 state = self.state[p]
131
132 target_device = p.device
133 if len(state) == 0:
134 state["step"] = 0
135 # gradient momentums
136 state["exp_avg"] = torch.zeros_like(p, device=target_device)
137 # gradient variances
138 state["exp_avg_sq"] = torch.zeros_like(p, device=target_device)
139 self._post_state_init(p)
140
141 state["step"] += 1
142 beta1, beta2 = group["betas"]
143
144 if target_device.type == "cpu":
145 assert p.data.numel() == p.grad.data.numel(), "parameter and gradient should have the same size"
146 assert state["exp_avg"].device.type == "cpu", "exp_avg should stay on cpu"
147 assert state["exp_avg_sq"].device.type == "cpu", "exp_avg should stay on cpu"
148 self._pre_update(p, "exp_avg", "exp_avg_sq")
149 if p.grad.dtype is torch.bfloat16:
150 # cpu adam kernel does not support bf16 now
151 bias_correction1 = 1 - beta1 ** state["step"]
152 bias_correction2 = 1 - beta2 ** state["step"]
153 self.torch_adam_update(
154 p.data,
155 p.grad.data,
156 state["exp_avg"],
157 state["exp_avg_sq"],
158 group["lr"],
159 beta1,
160 beta2,
161 group["eps"],
162 group["weight_decay"],
163 bias_correction1,
164 bias_correction2,
165 self.adamw_mode,
166 )
167 else:
168 self.cpu_adam_op.step(
169 state["step"],
170 group["lr"],
171 beta1,
172 beta2,
173 group["eps"],
174 group["weight_decay"],
175 group["bias_correction"],
176 p.data,
177 p.grad.data,
178 state["exp_avg"],
179 state["exp_avg_sq"],
180 div_scale,
181 )
182 self._post_update(p, "exp_avg", "exp_avg_sq")
183 elif target_device.type == "cuda":
184 assert div_scale == -1, "div_scale should remain default"
185 assert state["exp_avg"].device.type == "cuda", "exp_avg should stay on cuda"
186 assert state["exp_avg_sq"].device.type == "cuda", "exp_avg should stay on cuda"
187
188 bias_correction1 = 1 - beta1 ** state["step"]
189 bias_correction2 = 1 - beta2 ** state["step"]
190
191 # adam on cuda
192 self.torch_adam_update(
193 p.data,
194 p.grad.data,
195 state["exp_avg"],
196 state["exp_avg_sq"],
197 group["lr"],
198 beta1,
199 beta2,
200 group["eps"],
201 group["weight_decay"],
202 bias_correction1,
203 bias_correction2,
204 self.adamw_mode,
205 )
206 else:
207 raise RuntimeError
208 self._post_step()
209 return loss
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/colossalai/nn/optimizer/cpu_adam.py b/colossalai/nn/optimizer/cpu_adam.py
--- a/colossalai/nn/optimizer/cpu_adam.py
+++ b/colossalai/nn/optimizer/cpu_adam.py
@@ -9,7 +9,8 @@
class CPUAdam(NVMeOptimizer):
- """Implements Adam algorithm.
+ """
+ Implements Adam algorithm.
Supports parameters updating on both GPU and CPU, depending on the device of parameters.
But the parameters and gradients should on the same device:
|
{"golden_diff": "diff --git a/colossalai/nn/optimizer/cpu_adam.py b/colossalai/nn/optimizer/cpu_adam.py\n--- a/colossalai/nn/optimizer/cpu_adam.py\n+++ b/colossalai/nn/optimizer/cpu_adam.py\n@@ -9,7 +9,8 @@\n \n \n class CPUAdam(NVMeOptimizer):\n- \"\"\"Implements Adam algorithm.\n+ \"\"\"\n+ Implements Adam algorithm.\n \n Supports parameters updating on both GPU and CPU, depending on the device of parameters.\n But the parameters and gradients should on the same device:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import math\nfrom typing import Optional\n\nimport torch\n\nfrom colossalai.kernel.op_builder import CPUAdamBuilder\n\nfrom .nvme_optimizer import NVMeOptimizer\n\n\nclass CPUAdam(NVMeOptimizer):\n \"\"\"Implements Adam algorithm.\n\n Supports parameters updating on both GPU and CPU, depending on the device of parameters.\n But the parameters and gradients should on the same device:\n * Parameters on CPU and gradients on CPU is allowed.\n * Parameters on GPU and gradients on GPU is allowed.\n * Parameters on GPU and gradients on CPU is **not** allowed.\n\n `CPUAdam` requires CUDA extensions which can be built during installation or runtime.\n\n This version of CPU Adam accelerates parameters updating on CPU with SIMD.\n Support of AVX2 or AVX512 is required.\n\n The GPU part is implemented in an naive way.\n\n CPU Adam also supports the hybrid precision calculation, eg. fp32 parameters and fp16 gradients.\n\n :class:`colossalai.nn.optimizer.CPUAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,\n or ``torch.optim.Adam`` with ``adamw_mode=False``\n\n Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n model_params (iterable): iterable of parameters of dicts defining\n parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square. (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False) NOT SUPPORTED yet in CPUAdam!\n adamw_mode (boolean, optional): Apply L2 regularization or weight decay\n True for decoupled weight decay(also known as AdamW) (default: True)\n simd_log (boolean, optional): whether to show if you are using SIMD to\n accelerate. (default: False)\n nvme_offload_fraction (float, optional): Fraction of optimizer states to be offloaded to NVMe. Defaults to 0.0.\n nvme_offload_dir (Optional[str], optional): Directory to save NVMe offload files.\n If it's ``None``, a random temporary directory will be used. Defaults to None.\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n # Number of fp32 shards for per parameter\n # Param weight, grad, momentum and variance\n num_fp32_shards_per_param = 4\n\n def __init__(\n self,\n model_params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n adamw_mode=True,\n nvme_offload_fraction: float = 0.0,\n nvme_offload_dir: Optional[str] = None,\n ):\n default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)\n super(CPUAdam, self).__init__(model_params, default_args, nvme_offload_fraction, nvme_offload_dir)\n self.adamw_mode = adamw_mode\n cpu_adam = CPUAdamBuilder().load()\n # if you find yourself stuck here, make sure that you install colossalai with CUDA_EXT=1 specification\n self.cpu_adam_op = cpu_adam.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay, adamw_mode)\n\n def torch_adam_update(\n self,\n data,\n grad,\n exp_avg,\n exp_avg_sq,\n lr,\n beta1,\n beta2,\n eps,\n weight_decay,\n bias_correction1,\n bias_correction2,\n use_adamw=False,\n ):\n grad = grad.to(data.dtype)\n\n if weight_decay != 0:\n if use_adamw:\n data.mul_(1 - lr * weight_decay)\n else:\n grad = grad.add(data, alpha=weight_decay)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # TODO(jiaruifang) dose not support amsgrad\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n\n step_size = lr / bias_correction1\n\n data.addcdiv_(exp_avg, denom, value=-step_size)\n\n @torch.no_grad()\n def step(self, closure=None, div_scale: float = -1):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n self._pre_step(\"exp_avg\", \"exp_avg_sq\")\n for _, group in enumerate(self.param_groups):\n for _, p in enumerate(group[\"params\"]):\n if p.grad is None:\n continue\n\n state = self.state[p]\n\n target_device = p.device\n if len(state) == 0:\n state[\"step\"] = 0\n # gradient momentums\n state[\"exp_avg\"] = torch.zeros_like(p, device=target_device)\n # gradient variances\n state[\"exp_avg_sq\"] = torch.zeros_like(p, device=target_device)\n self._post_state_init(p)\n\n state[\"step\"] += 1\n beta1, beta2 = group[\"betas\"]\n\n if target_device.type == \"cpu\":\n assert p.data.numel() == p.grad.data.numel(), \"parameter and gradient should have the same size\"\n assert state[\"exp_avg\"].device.type == \"cpu\", \"exp_avg should stay on cpu\"\n assert state[\"exp_avg_sq\"].device.type == \"cpu\", \"exp_avg should stay on cpu\"\n self._pre_update(p, \"exp_avg\", \"exp_avg_sq\")\n if p.grad.dtype is torch.bfloat16:\n # cpu adam kernel does not support bf16 now\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n self.torch_adam_update(\n p.data,\n p.grad.data,\n state[\"exp_avg\"],\n state[\"exp_avg_sq\"],\n group[\"lr\"],\n beta1,\n beta2,\n group[\"eps\"],\n group[\"weight_decay\"],\n bias_correction1,\n bias_correction2,\n self.adamw_mode,\n )\n else:\n self.cpu_adam_op.step(\n state[\"step\"],\n group[\"lr\"],\n beta1,\n beta2,\n group[\"eps\"],\n group[\"weight_decay\"],\n group[\"bias_correction\"],\n p.data,\n p.grad.data,\n state[\"exp_avg\"],\n state[\"exp_avg_sq\"],\n div_scale,\n )\n self._post_update(p, \"exp_avg\", \"exp_avg_sq\")\n elif target_device.type == \"cuda\":\n assert div_scale == -1, \"div_scale should remain default\"\n assert state[\"exp_avg\"].device.type == \"cuda\", \"exp_avg should stay on cuda\"\n assert state[\"exp_avg_sq\"].device.type == \"cuda\", \"exp_avg should stay on cuda\"\n\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n\n # adam on cuda\n self.torch_adam_update(\n p.data,\n p.grad.data,\n state[\"exp_avg\"],\n state[\"exp_avg_sq\"],\n group[\"lr\"],\n beta1,\n beta2,\n group[\"eps\"],\n group[\"weight_decay\"],\n bias_correction1,\n bias_correction2,\n self.adamw_mode,\n )\n else:\n raise RuntimeError\n self._post_step()\n return loss\n", "path": "colossalai/nn/optimizer/cpu_adam.py"}], "after_files": [{"content": "import math\nfrom typing import Optional\n\nimport torch\n\nfrom colossalai.kernel.op_builder import CPUAdamBuilder\n\nfrom .nvme_optimizer import NVMeOptimizer\n\n\nclass CPUAdam(NVMeOptimizer):\n \"\"\"\n Implements Adam algorithm.\n\n Supports parameters updating on both GPU and CPU, depending on the device of parameters.\n But the parameters and gradients should on the same device:\n * Parameters on CPU and gradients on CPU is allowed.\n * Parameters on GPU and gradients on GPU is allowed.\n * Parameters on GPU and gradients on CPU is **not** allowed.\n\n `CPUAdam` requires CUDA extensions which can be built during installation or runtime.\n\n This version of CPU Adam accelerates parameters updating on CPU with SIMD.\n Support of AVX2 or AVX512 is required.\n\n The GPU part is implemented in an naive way.\n\n CPU Adam also supports the hybrid precision calculation, eg. fp32 parameters and fp16 gradients.\n\n :class:`colossalai.nn.optimizer.CPUAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``,\n or ``torch.optim.Adam`` with ``adamw_mode=False``\n\n Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n model_params (iterable): iterable of parameters of dicts defining\n parameter groups.\n lr (float, optional): learning rate. (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square. (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability. (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False) NOT SUPPORTED yet in CPUAdam!\n adamw_mode (boolean, optional): Apply L2 regularization or weight decay\n True for decoupled weight decay(also known as AdamW) (default: True)\n simd_log (boolean, optional): whether to show if you are using SIMD to\n accelerate. (default: False)\n nvme_offload_fraction (float, optional): Fraction of optimizer states to be offloaded to NVMe. Defaults to 0.0.\n nvme_offload_dir (Optional[str], optional): Directory to save NVMe offload files.\n If it's ``None``, a random temporary directory will be used. Defaults to None.\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n # Number of fp32 shards for per parameter\n # Param weight, grad, momentum and variance\n num_fp32_shards_per_param = 4\n\n def __init__(\n self,\n model_params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n adamw_mode=True,\n nvme_offload_fraction: float = 0.0,\n nvme_offload_dir: Optional[str] = None,\n ):\n default_args = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, bias_correction=bias_correction)\n super(CPUAdam, self).__init__(model_params, default_args, nvme_offload_fraction, nvme_offload_dir)\n self.adamw_mode = adamw_mode\n cpu_adam = CPUAdamBuilder().load()\n # if you find yourself stuck here, make sure that you install colossalai with CUDA_EXT=1 specification\n self.cpu_adam_op = cpu_adam.CPUAdamOptimizer(lr, betas[0], betas[1], eps, weight_decay, adamw_mode)\n\n def torch_adam_update(\n self,\n data,\n grad,\n exp_avg,\n exp_avg_sq,\n lr,\n beta1,\n beta2,\n eps,\n weight_decay,\n bias_correction1,\n bias_correction2,\n use_adamw=False,\n ):\n grad = grad.to(data.dtype)\n\n if weight_decay != 0:\n if use_adamw:\n data.mul_(1 - lr * weight_decay)\n else:\n grad = grad.add(data, alpha=weight_decay)\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # TODO(jiaruifang) dose not support amsgrad\n denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n\n step_size = lr / bias_correction1\n\n data.addcdiv_(exp_avg, denom, value=-step_size)\n\n @torch.no_grad()\n def step(self, closure=None, div_scale: float = -1):\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n self._pre_step(\"exp_avg\", \"exp_avg_sq\")\n for _, group in enumerate(self.param_groups):\n for _, p in enumerate(group[\"params\"]):\n if p.grad is None:\n continue\n\n state = self.state[p]\n\n target_device = p.device\n if len(state) == 0:\n state[\"step\"] = 0\n # gradient momentums\n state[\"exp_avg\"] = torch.zeros_like(p, device=target_device)\n # gradient variances\n state[\"exp_avg_sq\"] = torch.zeros_like(p, device=target_device)\n self._post_state_init(p)\n\n state[\"step\"] += 1\n beta1, beta2 = group[\"betas\"]\n\n if target_device.type == \"cpu\":\n assert p.data.numel() == p.grad.data.numel(), \"parameter and gradient should have the same size\"\n assert state[\"exp_avg\"].device.type == \"cpu\", \"exp_avg should stay on cpu\"\n assert state[\"exp_avg_sq\"].device.type == \"cpu\", \"exp_avg should stay on cpu\"\n self._pre_update(p, \"exp_avg\", \"exp_avg_sq\")\n if p.grad.dtype is torch.bfloat16:\n # cpu adam kernel does not support bf16 now\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n self.torch_adam_update(\n p.data,\n p.grad.data,\n state[\"exp_avg\"],\n state[\"exp_avg_sq\"],\n group[\"lr\"],\n beta1,\n beta2,\n group[\"eps\"],\n group[\"weight_decay\"],\n bias_correction1,\n bias_correction2,\n self.adamw_mode,\n )\n else:\n self.cpu_adam_op.step(\n state[\"step\"],\n group[\"lr\"],\n beta1,\n beta2,\n group[\"eps\"],\n group[\"weight_decay\"],\n group[\"bias_correction\"],\n p.data,\n p.grad.data,\n state[\"exp_avg\"],\n state[\"exp_avg_sq\"],\n div_scale,\n )\n self._post_update(p, \"exp_avg\", \"exp_avg_sq\")\n elif target_device.type == \"cuda\":\n assert div_scale == -1, \"div_scale should remain default\"\n assert state[\"exp_avg\"].device.type == \"cuda\", \"exp_avg should stay on cuda\"\n assert state[\"exp_avg_sq\"].device.type == \"cuda\", \"exp_avg should stay on cuda\"\n\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n\n # adam on cuda\n self.torch_adam_update(\n p.data,\n p.grad.data,\n state[\"exp_avg\"],\n state[\"exp_avg_sq\"],\n group[\"lr\"],\n beta1,\n beta2,\n group[\"eps\"],\n group[\"weight_decay\"],\n bias_correction1,\n bias_correction2,\n self.adamw_mode,\n )\n else:\n raise RuntimeError\n self._post_step()\n return loss\n", "path": "colossalai/nn/optimizer/cpu_adam.py"}]}
| 2,660 | 126 |
gh_patches_debug_1730
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-209
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Secure Django's language cookie
The following can be configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py)
* [x] [`LANGUAGE_COOKIE_HTTPONLY`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-httponly) = True (same as `SESSION_COOKIE_HTTPONLY`)
* [x] [`LANGUAGE_COOKIE_SAMESITE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-samesite) = "Strict" (same as `SESSION_COOKIE_SAMESITE`)
* [x] [`LANGUAGE_COOKIE_SECURE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-secure) = True (same as `SESSION_COOKIE_SECURE`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8
9 # SECURITY WARNING: keep the secret key used in production secret!
10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
11
12 # SECURITY WARNING: don't run with debug turned on in production!
13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
14
15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
16
17 ALLOWED_HOSTS = []
18
19 if DEBUG:
20 ALLOWED_HOSTS.extend(["*"])
21 else:
22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split(",")
23 ALLOWED_HOSTS.extend(hosts)
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 ]
34
35 if ADMIN:
36 INSTALLED_APPS.extend(
37 [
38 "django.contrib.admin",
39 "django.contrib.auth",
40 "django.contrib.contenttypes",
41 "django.contrib.messages",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.middleware.locale.LocaleMiddleware",
49 "benefits.core.middleware.Healthcheck",
50 "django.middleware.common.CommonMiddleware",
51 "django.middleware.csrf.CsrfViewMiddleware",
52 "django.middleware.clickjacking.XFrameOptionsMiddleware",
53 "benefits.core.middleware.DebugSession",
54 "benefits.core.middleware.ChangedLanguageEvent",
55 ]
56
57 if ADMIN:
58 MIDDLEWARE.extend(
59 [
60 "django.contrib.auth.middleware.AuthenticationMiddleware",
61 "django.contrib.messages.middleware.MessageMiddleware",
62 ]
63 )
64
65 CSRF_COOKIE_HTTPONLY = True
66
67 SESSION_COOKIE_AGE = 3600
68 SESSION_COOKIE_SAMESITE = "Strict"
69 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
70
71 if not DEBUG:
72 CSRF_COOKIE_SECURE = True
73 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
74 SESSION_COOKIE_SECURE = True
75
76 ROOT_URLCONF = "benefits.urls"
77
78 template_ctx_processors = [
79 "django.template.context_processors.request",
80 "benefits.core.context_processors.analytics",
81 ]
82
83 if DEBUG:
84 template_ctx_processors.extend(
85 [
86 "django.template.context_processors.debug",
87 "benefits.core.context_processors.debug",
88 ]
89 )
90
91 if ADMIN:
92 template_ctx_processors.extend(
93 [
94 "django.contrib.auth.context_processors.auth",
95 "django.contrib.messages.context_processors.messages",
96 ]
97 )
98
99 TEMPLATES = [
100 {
101 "BACKEND": "django.template.backends.django.DjangoTemplates",
102 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
103 "APP_DIRS": True,
104 "OPTIONS": {
105 "context_processors": template_ctx_processors,
106 },
107 },
108 ]
109
110 WSGI_APPLICATION = "benefits.wsgi.application"
111
112 DATABASES = {
113 "default": {
114 "ENGINE": "django.db.backends.sqlite3",
115 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
116 }
117 }
118
119 # Password validation
120
121 AUTH_PASSWORD_VALIDATORS = []
122
123 if ADMIN:
124 AUTH_PASSWORD_VALIDATORS.extend(
125 [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139 )
140
141 # Internationalization
142
143 LANGUAGE_CODE = "en"
144
145 LANGUAGES = [("en", "English"), ("es", "Español")]
146
147 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
148
149 USE_I18N = True
150 USE_L10N = True
151
152 TIME_ZONE = "UTC"
153 USE_TZ = True
154
155 # Static files (CSS, JavaScript, Images)
156
157 STATIC_URL = "/static/"
158 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
159 STATIC_ROOT = os.path.join(BASE_DIR, "static")
160
161 # Logging configuration
162
163 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
164 LOGGING = {
165 "version": 1,
166 "disable_existing_loggers": False,
167 "formatters": {
168 "default": {
169 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
170 "datefmt": "%d/%b/%Y %H:%M:%S",
171 "style": "{",
172 },
173 },
174 "handlers": {
175 "default": {"class": "logging.StreamHandler", "formatter": "default"},
176 },
177 "root": {
178 "handlers": ["default"],
179 "level": LOG_LEVEL,
180 },
181 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
182 }
183
184 # Analytics configuration
185
186 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -142,6 +142,10 @@
LANGUAGE_CODE = "en"
+LANGUAGE_COOKIE_HTTPONLY = True
+LANGUAGE_COOKIE_SAMESITE = "Strict"
+LANGUAGE_COOKIE_SECURE = True
+
LANGUAGES = [("en", "English"), ("es", "Español")]
LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -142,6 +142,10 @@\n \n LANGUAGE_CODE = \"en\"\n \n+LANGUAGE_COOKIE_HTTPONLY = True\n+LANGUAGE_COOKIE_SAMESITE = \"Strict\"\n+LANGUAGE_COOKIE_SECURE = True\n+\n LANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n \n LOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n", "issue": "Secure Django's language cookie\nThe following can be configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py)\r\n\r\n* [x] [`LANGUAGE_COOKIE_HTTPONLY`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-httponly) = True (same as `SESSION_COOKIE_HTTPONLY`)\r\n* [x] [`LANGUAGE_COOKIE_SAMESITE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-samesite) = \"Strict\" (same as `SESSION_COOKIE_SAMESITE`)\r\n* [x] [`LANGUAGE_COOKIE_SECURE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-secure) = True (same as `SESSION_COOKIE_SECURE`)\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}]}
| 1,977 | 119 |
gh_patches_debug_36828
|
rasdani/github-patches
|
git_diff
|
crytic__slither-577
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FileNotFoundError: [Errno 2] No such file or directory: 'solc'
I launch program like this:
slither Contract.sol
and receive an error:
"FileNotFoundError: [Errno 2] No such file or directory: 'solc'"
I have solc installed.
$ solcjs --version
0.4.25+commit.59dbf8f1.Emscripten.clang
But executable is called **solcjs**, not **solc**. Or it is something different?
Reasoning for "Trusted" versions of Solidity
Re: https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity
I am wondering why `0.4.25 or 0.5.11` are chosen as trusted versions.
Would 0.4.26 or 0.5.17 be acceptable? (current highest patches of those minor versions)
Why are none of the 0.6.x versions included as a "trusted" version?
What are the criteria for adding / bumping the version of "trusted"? Is it based on a simple metric like time since release? Or were those two specific versions manually audited by your team members?
Sorry for all the questions. I have a project which is fairly portable between `>=0.5.0 <=0.7.0` and am wondering which version to target... I would like to use `immutable` from `>=0.6.5`, but not at the expense of some possible security issue. And after using this tool I wondered what the criteria was for "trusted versions" of the compiler.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/attributes/incorrect_solc.py`
Content:
```
1 """
2 Check if an incorrect version of solc is used
3 """
4
5 import re
6 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
7 from slither.formatters.attributes.incorrect_solc import format
8
9 # group:
10 # 0: ^ > >= < <= (optional)
11 # 1: ' ' (optional)
12 # 2: version number
13 # 3: version number
14 # 4: version number
15
16 PATTERN = re.compile('(\^|>|>=|<|<=)?([ ]+)?(\d+)\.(\d+)\.(\d+)')
17
18
19 class IncorrectSolc(AbstractDetector):
20 """
21 Check if an old version of solc is used
22 """
23
24 ARGUMENT = 'solc-version'
25 HELP = 'Incorrect Solidity version'
26 IMPACT = DetectorClassification.INFORMATIONAL
27 CONFIDENCE = DetectorClassification.HIGH
28
29 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity'
30
31 WIKI_TITLE = 'Incorrect versions of Solidity'
32 WIKI_DESCRIPTION = '''
33 `solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.
34 We also recommend avoiding complex `pragma` statement.'''
35 WIKI_RECOMMENDATION = '''
36 Use Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''
37
38 COMPLEX_PRAGMA_TXT = "is too complex"
39 OLD_VERSION_TXT = "allows old versions"
40 LESS_THAN_TXT = "uses lesser than"
41
42 TOO_RECENT_VERSION_TXT = "necessitates versions too recent to be trusted. Consider deploying with 0.5.11"
43 BUGGY_VERSION_TXT = "is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)"
44
45 # Indicates the allowed versions. Must be formatted in increasing order.
46 ALLOWED_VERSIONS = ["0.4.25", "0.4.26", "0.5.11"]
47
48 # Indicates the versions that should not be used.
49 BUGGY_VERSIONS = ["0.4.22", "^0.4.22",
50 "0.5.5", "^0.5.5",
51 "0.5.6", "^0.5.6",
52 "0.5.14", "^0.5.14"]
53
54 def _check_version(self, version):
55 op = version[0]
56 if op and op not in ['>', '>=', '^']:
57 return self.LESS_THAN_TXT
58 version_number = '.'.join(version[2:])
59 if version_number not in self.ALLOWED_VERSIONS:
60 if list(map(int, version[2:])) > list(map(int, self.ALLOWED_VERSIONS[-1].split('.'))):
61 return self.TOO_RECENT_VERSION_TXT
62 return self.OLD_VERSION_TXT
63 return None
64
65 def _check_pragma(self, version):
66 if version in self.BUGGY_VERSIONS:
67 return self.BUGGY_VERSION_TXT
68 versions = PATTERN.findall(version)
69 if len(versions) == 1:
70 version = versions[0]
71 return self._check_version(version)
72 elif len(versions) == 2:
73 version_left = versions[0]
74 version_right = versions[1]
75 # Only allow two elements if the second one is
76 # <0.5.0 or <0.6.0
77 if version_right not in [('<', '', '0', '5', '0'), ('<', '', '0', '6', '0'), ('<', '', '0', '7', '0')]:
78 return self.COMPLEX_PRAGMA_TXT
79 return self._check_version(version_left)
80 else:
81 return self.COMPLEX_PRAGMA_TXT
82
83 def _detect(self):
84 """
85 Detects pragma statements that allow for outdated solc versions.
86 :return: Returns the relevant JSON data for the findings.
87 """
88 # Detect all version related pragmas and check if they are disallowed.
89 results = []
90 pragma = self.slither.pragma_directives
91 disallowed_pragmas = []
92 detected_version = False
93 for p in pragma:
94 # Skip any pragma directives which do not refer to version
95 if len(p.directive) < 1 or p.directive[0] != "solidity":
96 continue
97
98 # This is version, so we test if this is disallowed.
99 detected_version = True
100 reason = self._check_pragma(p.version)
101 if reason:
102 disallowed_pragmas.append((reason, p))
103
104 # If we found any disallowed pragmas, we output our findings.
105 if disallowed_pragmas:
106 for (reason, p) in disallowed_pragmas:
107 info = ["Pragma version", p, f" {reason}\n"]
108
109 json = self.generate_result(info)
110
111 results.append(json)
112
113 return results
114
115 @staticmethod
116 def _format(slither, result):
117 format(slither, result)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/detectors/attributes/incorrect_solc.py b/slither/detectors/attributes/incorrect_solc.py
--- a/slither/detectors/attributes/incorrect_solc.py
+++ b/slither/detectors/attributes/incorrect_solc.py
@@ -33,23 +33,30 @@
`solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.
We also recommend avoiding complex `pragma` statement.'''
WIKI_RECOMMENDATION = '''
-Use Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''
+Deploy with any of the following Solidity versions:
+- 0.5.11 - 0.5.13,
+- 0.5.15 - 0.5.17,
+- 0.6.8,
+- 0.6.10 - 0.6.11.
+Use a simple pragma version that allows any of these versions.
+Consider using the latest version of Solidity for testing.'''
COMPLEX_PRAGMA_TXT = "is too complex"
OLD_VERSION_TXT = "allows old versions"
LESS_THAN_TXT = "uses lesser than"
- TOO_RECENT_VERSION_TXT = "necessitates versions too recent to be trusted. Consider deploying with 0.5.11"
- BUGGY_VERSION_TXT = "is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)"
+ TOO_RECENT_VERSION_TXT = "necessitates a version too recent to be trusted. Consider deploying with 0.6.11"
+ BUGGY_VERSION_TXT = "is known to contain severe issues (https://solidity.readthedocs.io/en/latest/bugs.html)"
# Indicates the allowed versions. Must be formatted in increasing order.
- ALLOWED_VERSIONS = ["0.4.25", "0.4.26", "0.5.11"]
+ ALLOWED_VERSIONS = ["0.5.11", "0.5.12", "0.5.13", "0.5.15", "0.5.16", "0.5.17", "0.6.8", "0.6.10", "0.6.11"]
# Indicates the versions that should not be used.
BUGGY_VERSIONS = ["0.4.22", "^0.4.22",
"0.5.5", "^0.5.5",
"0.5.6", "^0.5.6",
- "0.5.14", "^0.5.14"]
+ "0.5.14", "^0.5.14",
+ "0.6.9", "^0.6.9"]
def _check_version(self, version):
op = version[0]
@@ -110,6 +117,17 @@
results.append(json)
+ if self.slither.crytic_compile:
+ if self.slither.crytic_compile.compiler_version:
+ if self.slither.crytic_compile.compiler_version.version not in self.ALLOWED_VERSIONS:
+ info = ["solc-",
+ self.slither.crytic_compile.compiler_version.version,
+ " is not recommended for deployement\n"]
+
+ json = self.generate_result(info)
+
+ results.append(json)
+
return results
@staticmethod
|
{"golden_diff": "diff --git a/slither/detectors/attributes/incorrect_solc.py b/slither/detectors/attributes/incorrect_solc.py\n--- a/slither/detectors/attributes/incorrect_solc.py\n+++ b/slither/detectors/attributes/incorrect_solc.py\n@@ -33,23 +33,30 @@\n `solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.\n We also recommend avoiding complex `pragma` statement.'''\n WIKI_RECOMMENDATION = '''\n-Use Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''\n+Deploy with any of the following Solidity versions:\n+- 0.5.11 - 0.5.13,\n+- 0.5.15 - 0.5.17,\n+- 0.6.8,\n+- 0.6.10 - 0.6.11.\n+Use a simple pragma version that allows any of these versions.\n+Consider using the latest version of Solidity for testing.'''\n \n COMPLEX_PRAGMA_TXT = \"is too complex\"\n OLD_VERSION_TXT = \"allows old versions\"\n LESS_THAN_TXT = \"uses lesser than\"\n \n- TOO_RECENT_VERSION_TXT = \"necessitates versions too recent to be trusted. Consider deploying with 0.5.11\"\n- BUGGY_VERSION_TXT = \"is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)\"\n+ TOO_RECENT_VERSION_TXT = \"necessitates a version too recent to be trusted. Consider deploying with 0.6.11\"\n+ BUGGY_VERSION_TXT = \"is known to contain severe issues (https://solidity.readthedocs.io/en/latest/bugs.html)\"\n \n # Indicates the allowed versions. Must be formatted in increasing order.\n- ALLOWED_VERSIONS = [\"0.4.25\", \"0.4.26\", \"0.5.11\"]\n+ ALLOWED_VERSIONS = [\"0.5.11\", \"0.5.12\", \"0.5.13\", \"0.5.15\", \"0.5.16\", \"0.5.17\", \"0.6.8\", \"0.6.10\", \"0.6.11\"]\n \n # Indicates the versions that should not be used.\n BUGGY_VERSIONS = [\"0.4.22\", \"^0.4.22\",\n \"0.5.5\", \"^0.5.5\",\n \"0.5.6\", \"^0.5.6\",\n- \"0.5.14\", \"^0.5.14\"]\n+ \"0.5.14\", \"^0.5.14\",\n+ \"0.6.9\", \"^0.6.9\"]\n \n def _check_version(self, version):\n op = version[0]\n@@ -110,6 +117,17 @@\n \n results.append(json)\n \n+ if self.slither.crytic_compile:\n+ if self.slither.crytic_compile.compiler_version:\n+ if self.slither.crytic_compile.compiler_version.version not in self.ALLOWED_VERSIONS:\n+ info = [\"solc-\",\n+ self.slither.crytic_compile.compiler_version.version,\n+ \" is not recommended for deployement\\n\"]\n+\n+ json = self.generate_result(info)\n+\n+ results.append(json)\n+\n return results\n \n @staticmethod\n", "issue": "FileNotFoundError: [Errno 2] No such file or directory: 'solc'\nI launch program like this:\r\n\r\n slither Contract.sol \r\n\r\nand receive an error:\r\n\"FileNotFoundError: [Errno 2] No such file or directory: 'solc'\"\r\n\r\nI have solc installed.\r\n\r\n $ solcjs --version\r\n 0.4.25+commit.59dbf8f1.Emscripten.clang\r\n\r\nBut executable is called **solcjs**, not **solc**. Or it is something different?\r\n\nReasoning for \"Trusted\" versions of Solidity\nRe: https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity\r\n\r\nI am wondering why `0.4.25 or 0.5.11` are chosen as trusted versions.\r\n\r\nWould 0.4.26 or 0.5.17 be acceptable? (current highest patches of those minor versions)\r\n\r\nWhy are none of the 0.6.x versions included as a \"trusted\" version?\r\n\r\nWhat are the criteria for adding / bumping the version of \"trusted\"? Is it based on a simple metric like time since release? Or were those two specific versions manually audited by your team members?\r\n\r\nSorry for all the questions. I have a project which is fairly portable between `>=0.5.0 <=0.7.0` and am wondering which version to target... I would like to use `immutable` from `>=0.6.5`, but not at the expense of some possible security issue. And after using this tool I wondered what the criteria was for \"trusted versions\" of the compiler.\r\n\r\nThanks.\n", "before_files": [{"content": "\"\"\"\n Check if an incorrect version of solc is used\n\"\"\"\n\nimport re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.attributes.incorrect_solc import format\n\n# group:\n# 0: ^ > >= < <= (optional)\n# 1: ' ' (optional)\n# 2: version number\n# 3: version number\n# 4: version number\n\nPATTERN = re.compile('(\\^|>|>=|<|<=)?([ ]+)?(\\d+)\\.(\\d+)\\.(\\d+)')\n\n\nclass IncorrectSolc(AbstractDetector):\n \"\"\"\n Check if an old version of solc is used\n \"\"\"\n\n ARGUMENT = 'solc-version'\n HELP = 'Incorrect Solidity version'\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity'\n\n WIKI_TITLE = 'Incorrect versions of Solidity'\n WIKI_DESCRIPTION = '''\n`solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.\nWe also recommend avoiding complex `pragma` statement.'''\n WIKI_RECOMMENDATION = '''\nUse Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''\n\n COMPLEX_PRAGMA_TXT = \"is too complex\"\n OLD_VERSION_TXT = \"allows old versions\"\n LESS_THAN_TXT = \"uses lesser than\"\n\n TOO_RECENT_VERSION_TXT = \"necessitates versions too recent to be trusted. Consider deploying with 0.5.11\"\n BUGGY_VERSION_TXT = \"is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)\"\n\n # Indicates the allowed versions. Must be formatted in increasing order.\n ALLOWED_VERSIONS = [\"0.4.25\", \"0.4.26\", \"0.5.11\"]\n\n # Indicates the versions that should not be used.\n BUGGY_VERSIONS = [\"0.4.22\", \"^0.4.22\",\n \"0.5.5\", \"^0.5.5\",\n \"0.5.6\", \"^0.5.6\",\n \"0.5.14\", \"^0.5.14\"]\n\n def _check_version(self, version):\n op = version[0]\n if op and op not in ['>', '>=', '^']:\n return self.LESS_THAN_TXT\n version_number = '.'.join(version[2:])\n if version_number not in self.ALLOWED_VERSIONS:\n if list(map(int, version[2:])) > list(map(int, self.ALLOWED_VERSIONS[-1].split('.'))):\n return self.TOO_RECENT_VERSION_TXT\n return self.OLD_VERSION_TXT\n return None\n\n def _check_pragma(self, version):\n if version in self.BUGGY_VERSIONS:\n return self.BUGGY_VERSION_TXT\n versions = PATTERN.findall(version)\n if len(versions) == 1:\n version = versions[0]\n return self._check_version(version)\n elif len(versions) == 2:\n version_left = versions[0]\n version_right = versions[1]\n # Only allow two elements if the second one is\n # <0.5.0 or <0.6.0\n if version_right not in [('<', '', '0', '5', '0'), ('<', '', '0', '6', '0'), ('<', '', '0', '7', '0')]:\n return self.COMPLEX_PRAGMA_TXT\n return self._check_version(version_left)\n else:\n return self.COMPLEX_PRAGMA_TXT\n\n def _detect(self):\n \"\"\"\n Detects pragma statements that allow for outdated solc versions.\n :return: Returns the relevant JSON data for the findings.\n \"\"\"\n # Detect all version related pragmas and check if they are disallowed.\n results = []\n pragma = self.slither.pragma_directives\n disallowed_pragmas = []\n detected_version = False\n for p in pragma:\n # Skip any pragma directives which do not refer to version\n if len(p.directive) < 1 or p.directive[0] != \"solidity\":\n continue\n\n # This is version, so we test if this is disallowed.\n detected_version = True\n reason = self._check_pragma(p.version)\n if reason:\n disallowed_pragmas.append((reason, p))\n\n # If we found any disallowed pragmas, we output our findings.\n if disallowed_pragmas:\n for (reason, p) in disallowed_pragmas:\n info = [\"Pragma version\", p, f\" {reason}\\n\"]\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n format(slither, result)\n", "path": "slither/detectors/attributes/incorrect_solc.py"}], "after_files": [{"content": "\"\"\"\n Check if an incorrect version of solc is used\n\"\"\"\n\nimport re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.attributes.incorrect_solc import format\n\n# group:\n# 0: ^ > >= < <= (optional)\n# 1: ' ' (optional)\n# 2: version number\n# 3: version number\n# 4: version number\n\nPATTERN = re.compile('(\\^|>|>=|<|<=)?([ ]+)?(\\d+)\\.(\\d+)\\.(\\d+)')\n\n\nclass IncorrectSolc(AbstractDetector):\n \"\"\"\n Check if an old version of solc is used\n \"\"\"\n\n ARGUMENT = 'solc-version'\n HELP = 'Incorrect Solidity version'\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity'\n\n WIKI_TITLE = 'Incorrect versions of Solidity'\n WIKI_DESCRIPTION = '''\n`solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.\nWe also recommend avoiding complex `pragma` statement.'''\n WIKI_RECOMMENDATION = '''\nDeploy with any of the following Solidity versions:\n- 0.5.11 - 0.5.13,\n- 0.5.15 - 0.5.17,\n- 0.6.8,\n- 0.6.10 - 0.6.11.\nUse a simple pragma version that allows any of these versions.\nConsider using the latest version of Solidity for testing.'''\n\n COMPLEX_PRAGMA_TXT = \"is too complex\"\n OLD_VERSION_TXT = \"allows old versions\"\n LESS_THAN_TXT = \"uses lesser than\"\n\n TOO_RECENT_VERSION_TXT = \"necessitates a version too recent to be trusted. Consider deploying with 0.6.11\"\n BUGGY_VERSION_TXT = \"is known to contain severe issues (https://solidity.readthedocs.io/en/latest/bugs.html)\"\n\n # Indicates the allowed versions. Must be formatted in increasing order.\n ALLOWED_VERSIONS = [\"0.5.11\", \"0.5.12\", \"0.5.13\", \"0.5.15\", \"0.5.16\", \"0.5.17\", \"0.6.8\", \"0.6.10\", \"0.6.11\"]\n\n # Indicates the versions that should not be used.\n BUGGY_VERSIONS = [\"0.4.22\", \"^0.4.22\",\n \"0.5.5\", \"^0.5.5\",\n \"0.5.6\", \"^0.5.6\",\n \"0.5.14\", \"^0.5.14\",\n \"0.6.9\", \"^0.6.9\"]\n\n def _check_version(self, version):\n op = version[0]\n if op and op not in ['>', '>=', '^']:\n return self.LESS_THAN_TXT\n version_number = '.'.join(version[2:])\n if version_number not in self.ALLOWED_VERSIONS:\n if list(map(int, version[2:])) > list(map(int, self.ALLOWED_VERSIONS[-1].split('.'))):\n return self.TOO_RECENT_VERSION_TXT\n return self.OLD_VERSION_TXT\n return None\n\n def _check_pragma(self, version):\n if version in self.BUGGY_VERSIONS:\n return self.BUGGY_VERSION_TXT\n versions = PATTERN.findall(version)\n if len(versions) == 1:\n version = versions[0]\n return self._check_version(version)\n elif len(versions) == 2:\n version_left = versions[0]\n version_right = versions[1]\n # Only allow two elements if the second one is\n # <0.5.0 or <0.6.0\n if version_right not in [('<', '', '0', '5', '0'), ('<', '', '0', '6', '0'), ('<', '', '0', '7', '0')]:\n return self.COMPLEX_PRAGMA_TXT\n return self._check_version(version_left)\n else:\n return self.COMPLEX_PRAGMA_TXT\n\n def _detect(self):\n \"\"\"\n Detects pragma statements that allow for outdated solc versions.\n :return: Returns the relevant JSON data for the findings.\n \"\"\"\n # Detect all version related pragmas and check if they are disallowed.\n results = []\n pragma = self.slither.pragma_directives\n disallowed_pragmas = []\n detected_version = False\n for p in pragma:\n # Skip any pragma directives which do not refer to version\n if len(p.directive) < 1 or p.directive[0] != \"solidity\":\n continue\n\n # This is version, so we test if this is disallowed.\n detected_version = True\n reason = self._check_pragma(p.version)\n if reason:\n disallowed_pragmas.append((reason, p))\n\n # If we found any disallowed pragmas, we output our findings.\n if disallowed_pragmas:\n for (reason, p) in disallowed_pragmas:\n info = [\"Pragma version\", p, f\" {reason}\\n\"]\n\n json = self.generate_result(info)\n\n results.append(json)\n\n if self.slither.crytic_compile:\n if self.slither.crytic_compile.compiler_version:\n if self.slither.crytic_compile.compiler_version.version not in self.ALLOWED_VERSIONS:\n info = [\"solc-\",\n self.slither.crytic_compile.compiler_version.version,\n \" is not recommended for deployement\\n\"]\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n format(slither, result)\n", "path": "slither/detectors/attributes/incorrect_solc.py"}]}
| 1,993 | 794 |
gh_patches_debug_2854
|
rasdani/github-patches
|
git_diff
|
wger-project__wger-170
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BMI And Calorie Calculator Not Working
Using this software in Linux Mint 13.
When I enter my data into either the BMI calculator or the calorie estimator nothing happens.
I have entered my height in cm and my weight in kgs.
The BMI calculator says my BMI = 0.
I'd be happy with 10.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wger/nutrition/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This file is part of wger Workout Manager.
4 #
5 # wger Workout Manager is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # wger Workout Manager is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16
17 import logging
18
19 from django import forms
20 from django.utils.translation import ugettext as _
21 from wger.core.models import UserProfile
22
23 from wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem
24 from wger.utils.widgets import Html5NumberInput
25
26
27 logger = logging.getLogger(__name__)
28
29
30 class UnitChooserForm(forms.Form):
31 '''
32 A small form to select an amount and a unit for an ingredient
33 '''
34 amount = forms.DecimalField(decimal_places=2,
35 max_digits=5,
36 localize=True)
37 unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),
38 empty_label="g",
39 required=False)
40
41 def __init__(self, *args, **kwargs):
42 super(UnitChooserForm, self).__init__(*args, **kwargs)
43
44 if len(args) and args[0].get('ingredient'):
45 ingredient_id = args[0]['ingredient']
46
47 elif kwargs.get('data'):
48 ingredient_id = kwargs['data']['ingredient_id']
49
50 else:
51 ingredient_id = -1
52
53 self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(
54 ingredient_id=ingredient_id).select_related()
55
56
57 class BmiForm(forms.ModelForm):
58 weight = forms.DecimalField(widget=Html5NumberInput(),
59 max_value=999)
60
61 class Meta:
62 model = UserProfile
63 fields = ('height', )
64
65
66 class BmrForm(forms.ModelForm):
67 '''
68 Form for the basal metabolic rate
69 '''
70 weight = forms.DecimalField(widget=Html5NumberInput())
71
72 class Meta:
73 model = UserProfile
74 fields = ('age', 'height', 'gender')
75
76
77 class PhysicalActivitiesForm(forms.ModelForm):
78 '''
79 Form for the additional physical activities
80 '''
81 class Meta:
82 model = UserProfile
83 fields = ('sleep_hours',
84 'work_hours',
85 'work_intensity',
86 'sport_hours',
87 'sport_intensity',
88 'freetime_hours',
89 'freetime_intensity')
90
91
92 class DailyCaloriesForm(forms.ModelForm):
93 '''
94 Form for the total daily calories needed
95 '''
96
97 base_calories = forms.IntegerField(label=_('Basic caloric intake'),
98 help_text=_('Your basic caloric intake as calculated for '
99 'your data'),
100 required=False,
101 widget=Html5NumberInput())
102 additional_calories = forms.IntegerField(label=_('Additional calories'),
103 help_text=_('Additional calories to add to the base '
104 'rate (to substract, enter a negative '
105 'number)'),
106 initial=0,
107 required=False,
108 widget=Html5NumberInput())
109
110 class Meta:
111 model = UserProfile
112 fields = ('calories',)
113
114
115 class MealItemForm(forms.ModelForm):
116 weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),
117 empty_label="g",
118 required=False)
119 ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),
120 widget=forms.HiddenInput)
121
122 class Meta:
123 model = MealItem
124 fields = '__all__'
125
126 def __init__(self, *args, **kwargs):
127 super(MealItemForm, self).__init__(*args, **kwargs)
128
129 # Get the ingredient_id
130 ingredient_id = None
131
132 if kwargs.get('instance'):
133 ingredient_id = kwargs['instance'].ingredient_id
134
135 if kwargs.get('data'):
136 ingredient_id = kwargs['data']['ingredient']
137
138 # Filter the available ingredients
139 if ingredient_id:
140 self.fields['weight_unit'].queryset = \
141 IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py
--- a/wger/nutrition/forms.py
+++ b/wger/nutrition/forms.py
@@ -55,6 +55,9 @@
class BmiForm(forms.ModelForm):
+ height = forms.DecimalField(widget=Html5NumberInput(),
+ max_value=999,
+ label=_('Height (cm)'))
weight = forms.DecimalField(widget=Html5NumberInput(),
max_value=999)
|
{"golden_diff": "diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py\n--- a/wger/nutrition/forms.py\n+++ b/wger/nutrition/forms.py\n@@ -55,6 +55,9 @@\n \n \n class BmiForm(forms.ModelForm):\n+ height = forms.DecimalField(widget=Html5NumberInput(),\n+ max_value=999,\n+ label=_('Height (cm)'))\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n", "issue": "BMI And Calorie Calculator Not Working\nUsing this software in Linux Mint 13.\nWhen I enter my data into either the BMI calculator or the calorie estimator nothing happens.\nI have entered my height in cm and my weight in kgs.\nThe BMI calculator says my BMI = 0.\nI'd be happy with 10.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n height = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999,\n label=_('Height (cm)'))\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py"}]}
| 1,550 | 111 |
gh_patches_debug_98
|
rasdani/github-patches
|
git_diff
|
spack__spack-6618
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xrootd needs openssl
xrootd needs openssl headers to compile for 4.6.0
spack find : always prompt 0 installed packages
On a clean `develop` checkout :
```
$ git clone https://github.com/LLNL/spack.git
Cloning into 'spack'...
remote: Counting objects: 25613, done.
remote: Compressing objects: 100% (42/42), done.
remote: Total 25613 (delta 12), reused 3 (delta 3), pack-reused 25557
Receiving objects: 100% (25613/25613), 6.65 MiB | 6.46 MiB/s, done.
Resolving deltas: 100% (13031/13031), done.
Checking connectivity... done.
$ cd spack
$ . share/spack/setup-env.sh
$ spack compilers
==> Available compilers
-- gcc ----------------------------------------------------------
[email protected]
$ spack install zlib
==> Installing zlib
==> Trying to fetch from file:///home/mculpo/production/spack-mirror/zlib/zlib-1.2.8.tar.gz
######################################################################## 100,0%
==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix/zlib-1.2.8.tar.gz
==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix
==> No patches needed for zlib
==> Building zlib
==> Successfully installed zlib
Fetch: 0.01s. Build: 3.69s. Total: 3.70s.
[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix
$ spack find
==> 0 installed packages.
$ spack install szip
==> Installing szip
==> Trying to fetch from file:///home/mculpo/production/spack-mirror/szip/szip-2.1.tar.gz
######################################################################## 100,0%
==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq/szip-2.1.tar.gz
==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq
==> No patches needed for szip
==> Building szip
==> Successfully installed szip
Fetch: 0.01s. Build: 8.09s. Total: 8.10s.
[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq
$ spack find
==> 0 installed packages.
```
The db seems to be written correctly :
```
database:
installs:
d6pdl6xvnvap6ihrqcqtgvweghbszmix:
explicit: true
installed: true
path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix
ref_count: 0
spec:
zlib:
arch: linux-x86_64
compiler:
name: gcc
version: '4.8'
dependencies: {}
namespace: builtin
parameters:
cflags: []
cppflags: []
cxxflags: []
fflags: []
ldflags: []
ldlibs: []
version: 1.2.8
esfmhl54wbdb7nnnip6y6jbxlbmxs2jq:
explicit: true
installed: true
path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq
ref_count: 0
spec:
szip:
arch: linux-x86_64
compiler:
name: gcc
version: '4.8'
dependencies: {}
namespace: builtin
parameters:
cflags: []
cppflags: []
cxxflags: []
fflags: []
ldflags: []
ldlibs: []
version: '2.1'
version: 0.9.1
```
xrootd requires zlib to be installed on system
CMake can't find zlib when installing xrootd. zlib is not listed as a dependency fro xrootd, so CMake looks for it on the system.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/xrootd/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/spack/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25
26 from spack import *
27
28
29 class Xrootd(CMakePackage):
30 """The XROOTD project aims at giving high performance, scalable fault
31 tolerant access to data repositories of many kinds."""
32 homepage = "http://xrootd.org"
33 url = "http://xrootd.org/download/v4.6.0/xrootd-4.6.0.tar.gz"
34
35 version('4.6.0', '5d60aade2d995b68fe0c46896bc4a5d1')
36 version('4.5.0', 'd485df3d4a991e1c35efa4bf9ef663d7')
37 version('4.4.1', '72b0842f802ccc94dede4ac5ab2a589e')
38 version('4.4.0', '58f55e56801d3661d753ff5fd33dbcc9')
39 version('4.3.0', '39c2fab9f632f35e12ff607ccaf9e16c')
40
41 depends_on('[email protected]:', type='build')
42 depends_on('zlib')
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/xrootd/package.py b/var/spack/repos/builtin/packages/xrootd/package.py
--- a/var/spack/repos/builtin/packages/xrootd/package.py
+++ b/var/spack/repos/builtin/packages/xrootd/package.py
@@ -40,3 +40,4 @@
depends_on('[email protected]:', type='build')
depends_on('zlib')
+ depends_on('openssl')
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/xrootd/package.py b/var/spack/repos/builtin/packages/xrootd/package.py\n--- a/var/spack/repos/builtin/packages/xrootd/package.py\n+++ b/var/spack/repos/builtin/packages/xrootd/package.py\n@@ -40,3 +40,4 @@\n \n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n+ depends_on('openssl')\n", "issue": "xrootd needs openssl\nxrootd needs openssl headers to compile for 4.6.0\nspack find : always prompt 0 installed packages\nOn a clean `develop` checkout : \n\n```\n$ git clone https://github.com/LLNL/spack.git\nCloning into 'spack'...\nremote: Counting objects: 25613, done.\nremote: Compressing objects: 100% (42/42), done.\nremote: Total 25613 (delta 12), reused 3 (delta 3), pack-reused 25557\nReceiving objects: 100% (25613/25613), 6.65 MiB | 6.46 MiB/s, done.\nResolving deltas: 100% (13031/13031), done.\nChecking connectivity... done.\n\n$ cd spack\n$ . share/spack/setup-env.sh \n$ spack compilers\n==> Available compilers\n-- gcc ----------------------------------------------------------\[email protected]\n\n$ spack install zlib\n==> Installing zlib\n==> Trying to fetch from file:///home/mculpo/production/spack-mirror/zlib/zlib-1.2.8.tar.gz\n######################################################################## 100,0%\n==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix/zlib-1.2.8.tar.gz\n==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix\n==> No patches needed for zlib\n==> Building zlib\n==> Successfully installed zlib\n Fetch: 0.01s. Build: 3.69s. Total: 3.70s.\n[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix\n\n$ spack find\n==> 0 installed packages.\n\n$ spack install szip\n==> Installing szip\n==> Trying to fetch from file:///home/mculpo/production/spack-mirror/szip/szip-2.1.tar.gz\n######################################################################## 100,0%\n==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq/szip-2.1.tar.gz\n==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq\n==> No patches needed for szip\n==> Building szip\n==> Successfully installed szip\n Fetch: 0.01s. Build: 8.09s. Total: 8.10s.\n[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq\n\n$ spack find \n==> 0 installed packages.\n```\n\nThe db seems to be written correctly : \n\n```\ndatabase:\n installs:\n d6pdl6xvnvap6ihrqcqtgvweghbszmix:\n explicit: true\n installed: true\n path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix\n ref_count: 0\n spec:\n zlib:\n arch: linux-x86_64\n compiler:\n name: gcc\n version: '4.8'\n dependencies: {}\n namespace: builtin\n parameters:\n cflags: []\n cppflags: []\n cxxflags: []\n fflags: []\n ldflags: []\n ldlibs: []\n version: 1.2.8\n esfmhl54wbdb7nnnip6y6jbxlbmxs2jq:\n explicit: true\n installed: true\n path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq\n ref_count: 0\n spec:\n szip:\n arch: linux-x86_64\n compiler:\n name: gcc\n version: '4.8'\n dependencies: {}\n namespace: builtin\n parameters:\n cflags: []\n cppflags: []\n cxxflags: []\n fflags: []\n ldflags: []\n ldlibs: []\n version: '2.1'\n version: 0.9.1\n```\n\nxrootd requires zlib to be installed on system\nCMake can't find zlib when installing xrootd. zlib is not listed as a dependency fro xrootd, so CMake looks for it on the system.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/spack/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\n\n\nclass Xrootd(CMakePackage):\n \"\"\"The XROOTD project aims at giving high performance, scalable fault\n tolerant access to data repositories of many kinds.\"\"\"\n homepage = \"http://xrootd.org\"\n url = \"http://xrootd.org/download/v4.6.0/xrootd-4.6.0.tar.gz\"\n\n version('4.6.0', '5d60aade2d995b68fe0c46896bc4a5d1')\n version('4.5.0', 'd485df3d4a991e1c35efa4bf9ef663d7')\n version('4.4.1', '72b0842f802ccc94dede4ac5ab2a589e')\n version('4.4.0', '58f55e56801d3661d753ff5fd33dbcc9')\n version('4.3.0', '39c2fab9f632f35e12ff607ccaf9e16c')\n\n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n", "path": "var/spack/repos/builtin/packages/xrootd/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/spack/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\n\n\nclass Xrootd(CMakePackage):\n \"\"\"The XROOTD project aims at giving high performance, scalable fault\n tolerant access to data repositories of many kinds.\"\"\"\n homepage = \"http://xrootd.org\"\n url = \"http://xrootd.org/download/v4.6.0/xrootd-4.6.0.tar.gz\"\n\n version('4.6.0', '5d60aade2d995b68fe0c46896bc4a5d1')\n version('4.5.0', 'd485df3d4a991e1c35efa4bf9ef663d7')\n version('4.4.1', '72b0842f802ccc94dede4ac5ab2a589e')\n version('4.4.0', '58f55e56801d3661d753ff5fd33dbcc9')\n version('4.3.0', '39c2fab9f632f35e12ff607ccaf9e16c')\n\n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n depends_on('openssl')\n", "path": "var/spack/repos/builtin/packages/xrootd/package.py"}]}
| 2,115 | 102 |
gh_patches_debug_39725
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-5787
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multi-qubit conditions in DeferredMeasurementsTransformer
Use https://github.com/quantumlib/Cirq/pull/5755 to allow control keys from multi-qubit measurements in deferred measurements transformers. That new feature allows us to express "anything but all zeros", which is the condition required.
https://github.com/quantumlib/Cirq/blob/77b61f500af3726ad4b34b06c72e21eac48d3db6/cirq-core/cirq/transformers/measurement_transformers.py#L121-L124
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/transformers/measurement_transformers.py`
Content:
```
1 # Copyright 2022 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
16
17 from cirq import ops, protocols, value
18 from cirq.transformers import transformer_api, transformer_primitives
19 from cirq.transformers.synchronize_terminal_measurements import find_terminal_measurements
20
21 if TYPE_CHECKING:
22 import cirq
23
24
25 class _MeasurementQid(ops.Qid):
26 """A qubit that substitutes in for a deferred measurement.
27
28 Exactly one qubit will be created per qubit in the measurement gate.
29 """
30
31 def __init__(self, key: Union[str, 'cirq.MeasurementKey'], qid: 'cirq.Qid'):
32 """Initializes the qubit.
33
34 Args:
35 key: The key of the measurement gate being deferred.
36 qid: One qubit that is being measured. Each deferred measurement
37 should create one new _MeasurementQid per qubit being measured
38 by that gate.
39 """
40 self._key = value.MeasurementKey.parse_serialized(key) if isinstance(key, str) else key
41 self._qid = qid
42
43 @property
44 def dimension(self) -> int:
45 return self._qid.dimension
46
47 def _comparison_key(self) -> Any:
48 return (str(self._key), self._qid._comparison_key())
49
50 def __str__(self) -> str:
51 return f"M('{self._key}', q={self._qid})"
52
53 def __repr__(self) -> str:
54 return f'_MeasurementQid({self._key!r}, {self._qid!r})'
55
56
57 @transformer_api.transformer
58 def defer_measurements(
59 circuit: 'cirq.AbstractCircuit', *, context: Optional['cirq.TransformerContext'] = None
60 ) -> 'cirq.Circuit':
61 """Implements the Deferred Measurement Principle.
62
63 Uses the Deferred Measurement Principle to move all measurements to the
64 end of the circuit. All non-terminal measurements are changed to
65 conditional quantum gates onto ancilla qubits, and classically controlled
66 operations are transformed to quantum controls from those ancilla qubits.
67 Finally, measurements of all ancilla qubits are appended to the end of the
68 circuit.
69
70 Optimizing deferred measurements is an area of active research, and future
71 iterations may contain optimizations that reduce the number of ancilla
72 qubits, so one should not depend on the exact shape of the output from this
73 function. Only the logical equivalence is guaranteed to remain unchanged.
74 Moment and subcircuit structure is not preserved.
75
76 Args:
77 circuit: The circuit to transform. It will not be modified.
78 context: `cirq.TransformerContext` storing common configurable options
79 for transformers.
80 Returns:
81 A circuit with equivalent logic, but all measurements at the end of the
82 circuit.
83 Raises:
84 ValueError: If sympy-based classical conditions are used, or if
85 conditions based on multi-qubit measurements exist. (The latter of
86 these is planned to be implemented soon).
87 NotImplementedError: When attempting to defer a measurement with a
88 confusion map. (https://github.com/quantumlib/Cirq/issues/5482)
89 """
90
91 circuit = transformer_primitives.unroll_circuit_op(circuit, deep=True, tags_to_check=None)
92 terminal_measurements = {op for _, op in find_terminal_measurements(circuit)}
93 measurement_qubits: Dict['cirq.MeasurementKey', List['_MeasurementQid']] = {}
94
95 def defer(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':
96 if op in terminal_measurements:
97 return op
98 gate = op.gate
99 if isinstance(gate, ops.MeasurementGate):
100 if gate.confusion_map:
101 raise NotImplementedError(
102 "Deferring confused measurement is not implemented, but found "
103 f"measurement with key={gate.key} and non-empty confusion map."
104 )
105 key = value.MeasurementKey.parse_serialized(gate.key)
106 targets = [_MeasurementQid(key, q) for q in op.qubits]
107 measurement_qubits[key] = targets
108 cxs = [ops.CX(q, target) for q, target in zip(op.qubits, targets)]
109 xs = [ops.X(targets[i]) for i, b in enumerate(gate.full_invert_mask()) if b]
110 return cxs + xs
111 elif protocols.is_measurement(op):
112 return [defer(op, None) for op in protocols.decompose_once(op)]
113 elif op.classical_controls:
114 controls = []
115 for c in op.classical_controls:
116 if isinstance(c, value.KeyCondition):
117 if c.key not in measurement_qubits:
118 raise ValueError(f'Deferred measurement for key={c.key} not found.')
119 qubits = measurement_qubits[c.key]
120 if len(qubits) != 1:
121 # TODO: Multi-qubit conditions require
122 # https://github.com/quantumlib/Cirq/issues/4512
123 # Remember to update docstring above once this works.
124 raise ValueError('Only single qubit conditions are allowed.')
125 controls.extend(qubits)
126 else:
127 raise ValueError('Only KeyConditions are allowed.')
128 return op.without_classical_controls().controlled_by(
129 *controls, control_values=[tuple(range(1, q.dimension)) for q in controls]
130 )
131 return op
132
133 circuit = transformer_primitives.map_operations_and_unroll(
134 circuit=circuit,
135 map_func=defer,
136 tags_to_ignore=context.tags_to_ignore if context else (),
137 raise_if_add_qubits=False,
138 ).unfreeze()
139 for k, qubits in measurement_qubits.items():
140 circuit.append(ops.measure(*qubits, key=k))
141 return circuit
142
143
144 @transformer_api.transformer
145 def dephase_measurements(
146 circuit: 'cirq.AbstractCircuit',
147 *,
148 context: Optional['cirq.TransformerContext'] = transformer_api.TransformerContext(deep=True),
149 ) -> 'cirq.Circuit':
150 """Changes all measurements to a dephase operation.
151
152 This transformer is useful when using a density matrix simulator, when
153 wishing to calculate the final density matrix of a circuit and not simulate
154 the measurements themselves.
155
156 Args:
157 circuit: The circuit to transform. It will not be modified.
158 context: `cirq.TransformerContext` storing common configurable options
159 for transformers. The default has `deep=True` to ensure
160 measurements at all levels are dephased.
161 Returns:
162 A copy of the circuit, with dephase operations in place of all
163 measurements.
164 Raises:
165 ValueError: If the circuit contains classical controls. In this case,
166 it is required to change these to quantum controls via
167 `cirq.defer_measurements` first. Since deferral adds ancilla qubits
168 to the circuit, this is not done automatically, to prevent
169 surprises.
170 """
171
172 def dephase(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':
173 gate = op.gate
174 if isinstance(gate, ops.MeasurementGate):
175 key = value.MeasurementKey.parse_serialized(gate.key)
176 return ops.KrausChannel.from_channel(ops.phase_damp(1), key=key).on_each(op.qubits)
177 elif isinstance(op, ops.ClassicallyControlledOperation):
178 raise ValueError('Use cirq.defer_measurements first to remove classical controls.')
179 return op
180
181 ignored = () if context is None else context.tags_to_ignore
182 return transformer_primitives.map_operations(
183 circuit, dephase, deep=context.deep if context else True, tags_to_ignore=ignored
184 ).unfreeze()
185
186
187 @transformer_api.transformer
188 def drop_terminal_measurements(
189 circuit: 'cirq.AbstractCircuit',
190 *,
191 context: Optional['cirq.TransformerContext'] = transformer_api.TransformerContext(deep=True),
192 ) -> 'cirq.Circuit':
193 """Removes terminal measurements from a circuit.
194
195 This transformer is helpful when trying to capture the final state vector
196 of a circuit with many terminal measurements, as simulating the circuit
197 with those measurements in place would otherwise collapse the final state.
198
199 Args:
200 circuit: The circuit to transform. It will not be modified.
201 context: `cirq.TransformerContext` storing common configurable options
202 for transformers. The default has `deep=True`, as "terminal
203 measurements" is ill-defined without inspecting subcircuits;
204 passing a context with `deep=False` will return an error.
205 Returns:
206 A copy of the circuit, with identity or X gates in place of terminal
207 measurements.
208 Raises:
209 ValueError: if the circuit contains non-terminal measurements, or if
210 the provided context has`deep=False`.
211 """
212
213 if context is None or not context.deep:
214 raise ValueError(
215 'Context has `deep=False`, but `deep=True` is required to drop terminal measurements.'
216 )
217
218 if not circuit.are_all_measurements_terminal():
219 raise ValueError('Circuit contains a non-terminal measurement.')
220
221 def flip_inversion(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':
222 if isinstance(op.gate, ops.MeasurementGate):
223 return [
224 ops.X(q) if b else ops.I(q) for q, b in zip(op.qubits, op.gate.full_invert_mask())
225 ]
226 return op
227
228 ignored = () if context is None else context.tags_to_ignore
229 return transformer_primitives.map_operations(
230 circuit, flip_inversion, deep=context.deep if context else True, tags_to_ignore=ignored
231 ).unfreeze()
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cirq-core/cirq/transformers/measurement_transformers.py b/cirq-core/cirq/transformers/measurement_transformers.py
--- a/cirq-core/cirq/transformers/measurement_transformers.py
+++ b/cirq-core/cirq/transformers/measurement_transformers.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import itertools
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union
from cirq import ops, protocols, value
@@ -81,9 +82,7 @@
A circuit with equivalent logic, but all measurements at the end of the
circuit.
Raises:
- ValueError: If sympy-based classical conditions are used, or if
- conditions based on multi-qubit measurements exist. (The latter of
- these is planned to be implemented soon).
+ ValueError: If sympy-based classical conditions are used.
NotImplementedError: When attempting to defer a measurement with a
confusion map. (https://github.com/quantumlib/Cirq/issues/5482)
"""
@@ -111,23 +110,22 @@
elif protocols.is_measurement(op):
return [defer(op, None) for op in protocols.decompose_once(op)]
elif op.classical_controls:
- controls = []
+ new_op = op.without_classical_controls()
for c in op.classical_controls:
if isinstance(c, value.KeyCondition):
if c.key not in measurement_qubits:
raise ValueError(f'Deferred measurement for key={c.key} not found.')
- qubits = measurement_qubits[c.key]
- if len(qubits) != 1:
- # TODO: Multi-qubit conditions require
- # https://github.com/quantumlib/Cirq/issues/4512
- # Remember to update docstring above once this works.
- raise ValueError('Only single qubit conditions are allowed.')
- controls.extend(qubits)
+ qs = measurement_qubits[c.key]
+ if len(qs) == 1:
+ control_values: Any = range(1, qs[0].dimension)
+ else:
+ all_values = itertools.product(*[range(q.dimension) for q in qs])
+ anything_but_all_zeros = tuple(itertools.islice(all_values, 1, None))
+ control_values = ops.SumOfProducts(anything_but_all_zeros)
+ new_op = new_op.controlled_by(*qs, control_values=control_values)
else:
raise ValueError('Only KeyConditions are allowed.')
- return op.without_classical_controls().controlled_by(
- *controls, control_values=[tuple(range(1, q.dimension)) for q in controls]
- )
+ return new_op
return op
circuit = transformer_primitives.map_operations_and_unroll(
|
{"golden_diff": "diff --git a/cirq-core/cirq/transformers/measurement_transformers.py b/cirq-core/cirq/transformers/measurement_transformers.py\n--- a/cirq-core/cirq/transformers/measurement_transformers.py\n+++ b/cirq-core/cirq/transformers/measurement_transformers.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import itertools\n from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union\n \n from cirq import ops, protocols, value\n@@ -81,9 +82,7 @@\n A circuit with equivalent logic, but all measurements at the end of the\n circuit.\n Raises:\n- ValueError: If sympy-based classical conditions are used, or if\n- conditions based on multi-qubit measurements exist. (The latter of\n- these is planned to be implemented soon).\n+ ValueError: If sympy-based classical conditions are used.\n NotImplementedError: When attempting to defer a measurement with a\n confusion map. (https://github.com/quantumlib/Cirq/issues/5482)\n \"\"\"\n@@ -111,23 +110,22 @@\n elif protocols.is_measurement(op):\n return [defer(op, None) for op in protocols.decompose_once(op)]\n elif op.classical_controls:\n- controls = []\n+ new_op = op.without_classical_controls()\n for c in op.classical_controls:\n if isinstance(c, value.KeyCondition):\n if c.key not in measurement_qubits:\n raise ValueError(f'Deferred measurement for key={c.key} not found.')\n- qubits = measurement_qubits[c.key]\n- if len(qubits) != 1:\n- # TODO: Multi-qubit conditions require\n- # https://github.com/quantumlib/Cirq/issues/4512\n- # Remember to update docstring above once this works.\n- raise ValueError('Only single qubit conditions are allowed.')\n- controls.extend(qubits)\n+ qs = measurement_qubits[c.key]\n+ if len(qs) == 1:\n+ control_values: Any = range(1, qs[0].dimension)\n+ else:\n+ all_values = itertools.product(*[range(q.dimension) for q in qs])\n+ anything_but_all_zeros = tuple(itertools.islice(all_values, 1, None))\n+ control_values = ops.SumOfProducts(anything_but_all_zeros)\n+ new_op = new_op.controlled_by(*qs, control_values=control_values)\n else:\n raise ValueError('Only KeyConditions are allowed.')\n- return op.without_classical_controls().controlled_by(\n- *controls, control_values=[tuple(range(1, q.dimension)) for q in controls]\n- )\n+ return new_op\n return op\n \n circuit = transformer_primitives.map_operations_and_unroll(\n", "issue": "Multi-qubit conditions in DeferredMeasurementsTransformer \nUse https://github.com/quantumlib/Cirq/pull/5755 to allow control keys from multi-qubit measurements in deferred measurements transformers. That new feature allows us to express \"anything but all zeros\", which is the condition required.\r\n\r\nhttps://github.com/quantumlib/Cirq/blob/77b61f500af3726ad4b34b06c72e21eac48d3db6/cirq-core/cirq/transformers/measurement_transformers.py#L121-L124\n", "before_files": [{"content": "# Copyright 2022 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom cirq import ops, protocols, value\nfrom cirq.transformers import transformer_api, transformer_primitives\nfrom cirq.transformers.synchronize_terminal_measurements import find_terminal_measurements\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass _MeasurementQid(ops.Qid):\n \"\"\"A qubit that substitutes in for a deferred measurement.\n\n Exactly one qubit will be created per qubit in the measurement gate.\n \"\"\"\n\n def __init__(self, key: Union[str, 'cirq.MeasurementKey'], qid: 'cirq.Qid'):\n \"\"\"Initializes the qubit.\n\n Args:\n key: The key of the measurement gate being deferred.\n qid: One qubit that is being measured. Each deferred measurement\n should create one new _MeasurementQid per qubit being measured\n by that gate.\n \"\"\"\n self._key = value.MeasurementKey.parse_serialized(key) if isinstance(key, str) else key\n self._qid = qid\n\n @property\n def dimension(self) -> int:\n return self._qid.dimension\n\n def _comparison_key(self) -> Any:\n return (str(self._key), self._qid._comparison_key())\n\n def __str__(self) -> str:\n return f\"M('{self._key}', q={self._qid})\"\n\n def __repr__(self) -> str:\n return f'_MeasurementQid({self._key!r}, {self._qid!r})'\n\n\n@transformer_api.transformer\ndef defer_measurements(\n circuit: 'cirq.AbstractCircuit', *, context: Optional['cirq.TransformerContext'] = None\n) -> 'cirq.Circuit':\n \"\"\"Implements the Deferred Measurement Principle.\n\n Uses the Deferred Measurement Principle to move all measurements to the\n end of the circuit. All non-terminal measurements are changed to\n conditional quantum gates onto ancilla qubits, and classically controlled\n operations are transformed to quantum controls from those ancilla qubits.\n Finally, measurements of all ancilla qubits are appended to the end of the\n circuit.\n\n Optimizing deferred measurements is an area of active research, and future\n iterations may contain optimizations that reduce the number of ancilla\n qubits, so one should not depend on the exact shape of the output from this\n function. Only the logical equivalence is guaranteed to remain unchanged.\n Moment and subcircuit structure is not preserved.\n\n Args:\n circuit: The circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options\n for transformers.\n Returns:\n A circuit with equivalent logic, but all measurements at the end of the\n circuit.\n Raises:\n ValueError: If sympy-based classical conditions are used, or if\n conditions based on multi-qubit measurements exist. (The latter of\n these is planned to be implemented soon).\n NotImplementedError: When attempting to defer a measurement with a\n confusion map. (https://github.com/quantumlib/Cirq/issues/5482)\n \"\"\"\n\n circuit = transformer_primitives.unroll_circuit_op(circuit, deep=True, tags_to_check=None)\n terminal_measurements = {op for _, op in find_terminal_measurements(circuit)}\n measurement_qubits: Dict['cirq.MeasurementKey', List['_MeasurementQid']] = {}\n\n def defer(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':\n if op in terminal_measurements:\n return op\n gate = op.gate\n if isinstance(gate, ops.MeasurementGate):\n if gate.confusion_map:\n raise NotImplementedError(\n \"Deferring confused measurement is not implemented, but found \"\n f\"measurement with key={gate.key} and non-empty confusion map.\"\n )\n key = value.MeasurementKey.parse_serialized(gate.key)\n targets = [_MeasurementQid(key, q) for q in op.qubits]\n measurement_qubits[key] = targets\n cxs = [ops.CX(q, target) for q, target in zip(op.qubits, targets)]\n xs = [ops.X(targets[i]) for i, b in enumerate(gate.full_invert_mask()) if b]\n return cxs + xs\n elif protocols.is_measurement(op):\n return [defer(op, None) for op in protocols.decompose_once(op)]\n elif op.classical_controls:\n controls = []\n for c in op.classical_controls:\n if isinstance(c, value.KeyCondition):\n if c.key not in measurement_qubits:\n raise ValueError(f'Deferred measurement for key={c.key} not found.')\n qubits = measurement_qubits[c.key]\n if len(qubits) != 1:\n # TODO: Multi-qubit conditions require\n # https://github.com/quantumlib/Cirq/issues/4512\n # Remember to update docstring above once this works.\n raise ValueError('Only single qubit conditions are allowed.')\n controls.extend(qubits)\n else:\n raise ValueError('Only KeyConditions are allowed.')\n return op.without_classical_controls().controlled_by(\n *controls, control_values=[tuple(range(1, q.dimension)) for q in controls]\n )\n return op\n\n circuit = transformer_primitives.map_operations_and_unroll(\n circuit=circuit,\n map_func=defer,\n tags_to_ignore=context.tags_to_ignore if context else (),\n raise_if_add_qubits=False,\n ).unfreeze()\n for k, qubits in measurement_qubits.items():\n circuit.append(ops.measure(*qubits, key=k))\n return circuit\n\n\n@transformer_api.transformer\ndef dephase_measurements(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = transformer_api.TransformerContext(deep=True),\n) -> 'cirq.Circuit':\n \"\"\"Changes all measurements to a dephase operation.\n\n This transformer is useful when using a density matrix simulator, when\n wishing to calculate the final density matrix of a circuit and not simulate\n the measurements themselves.\n\n Args:\n circuit: The circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options\n for transformers. The default has `deep=True` to ensure\n measurements at all levels are dephased.\n Returns:\n A copy of the circuit, with dephase operations in place of all\n measurements.\n Raises:\n ValueError: If the circuit contains classical controls. In this case,\n it is required to change these to quantum controls via\n `cirq.defer_measurements` first. Since deferral adds ancilla qubits\n to the circuit, this is not done automatically, to prevent\n surprises.\n \"\"\"\n\n def dephase(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':\n gate = op.gate\n if isinstance(gate, ops.MeasurementGate):\n key = value.MeasurementKey.parse_serialized(gate.key)\n return ops.KrausChannel.from_channel(ops.phase_damp(1), key=key).on_each(op.qubits)\n elif isinstance(op, ops.ClassicallyControlledOperation):\n raise ValueError('Use cirq.defer_measurements first to remove classical controls.')\n return op\n\n ignored = () if context is None else context.tags_to_ignore\n return transformer_primitives.map_operations(\n circuit, dephase, deep=context.deep if context else True, tags_to_ignore=ignored\n ).unfreeze()\n\n\n@transformer_api.transformer\ndef drop_terminal_measurements(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = transformer_api.TransformerContext(deep=True),\n) -> 'cirq.Circuit':\n \"\"\"Removes terminal measurements from a circuit.\n\n This transformer is helpful when trying to capture the final state vector\n of a circuit with many terminal measurements, as simulating the circuit\n with those measurements in place would otherwise collapse the final state.\n\n Args:\n circuit: The circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options\n for transformers. The default has `deep=True`, as \"terminal\n measurements\" is ill-defined without inspecting subcircuits;\n passing a context with `deep=False` will return an error.\n Returns:\n A copy of the circuit, with identity or X gates in place of terminal\n measurements.\n Raises:\n ValueError: if the circuit contains non-terminal measurements, or if\n the provided context has`deep=False`.\n \"\"\"\n\n if context is None or not context.deep:\n raise ValueError(\n 'Context has `deep=False`, but `deep=True` is required to drop terminal measurements.'\n )\n\n if not circuit.are_all_measurements_terminal():\n raise ValueError('Circuit contains a non-terminal measurement.')\n\n def flip_inversion(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':\n if isinstance(op.gate, ops.MeasurementGate):\n return [\n ops.X(q) if b else ops.I(q) for q, b in zip(op.qubits, op.gate.full_invert_mask())\n ]\n return op\n\n ignored = () if context is None else context.tags_to_ignore\n return transformer_primitives.map_operations(\n circuit, flip_inversion, deep=context.deep if context else True, tags_to_ignore=ignored\n ).unfreeze()\n", "path": "cirq-core/cirq/transformers/measurement_transformers.py"}], "after_files": [{"content": "# Copyright 2022 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\nfrom typing import Any, Dict, List, Optional, TYPE_CHECKING, Union\n\nfrom cirq import ops, protocols, value\nfrom cirq.transformers import transformer_api, transformer_primitives\nfrom cirq.transformers.synchronize_terminal_measurements import find_terminal_measurements\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass _MeasurementQid(ops.Qid):\n \"\"\"A qubit that substitutes in for a deferred measurement.\n\n Exactly one qubit will be created per qubit in the measurement gate.\n \"\"\"\n\n def __init__(self, key: Union[str, 'cirq.MeasurementKey'], qid: 'cirq.Qid'):\n \"\"\"Initializes the qubit.\n\n Args:\n key: The key of the measurement gate being deferred.\n qid: One qubit that is being measured. Each deferred measurement\n should create one new _MeasurementQid per qubit being measured\n by that gate.\n \"\"\"\n self._key = value.MeasurementKey.parse_serialized(key) if isinstance(key, str) else key\n self._qid = qid\n\n @property\n def dimension(self) -> int:\n return self._qid.dimension\n\n def _comparison_key(self) -> Any:\n return (str(self._key), self._qid._comparison_key())\n\n def __str__(self) -> str:\n return f\"M('{self._key}', q={self._qid})\"\n\n def __repr__(self) -> str:\n return f'_MeasurementQid({self._key!r}, {self._qid!r})'\n\n\n@transformer_api.transformer\ndef defer_measurements(\n circuit: 'cirq.AbstractCircuit', *, context: Optional['cirq.TransformerContext'] = None\n) -> 'cirq.Circuit':\n \"\"\"Implements the Deferred Measurement Principle.\n\n Uses the Deferred Measurement Principle to move all measurements to the\n end of the circuit. All non-terminal measurements are changed to\n conditional quantum gates onto ancilla qubits, and classically controlled\n operations are transformed to quantum controls from those ancilla qubits.\n Finally, measurements of all ancilla qubits are appended to the end of the\n circuit.\n\n Optimizing deferred measurements is an area of active research, and future\n iterations may contain optimizations that reduce the number of ancilla\n qubits, so one should not depend on the exact shape of the output from this\n function. Only the logical equivalence is guaranteed to remain unchanged.\n Moment and subcircuit structure is not preserved.\n\n Args:\n circuit: The circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options\n for transformers.\n Returns:\n A circuit with equivalent logic, but all measurements at the end of the\n circuit.\n Raises:\n ValueError: If sympy-based classical conditions are used.\n NotImplementedError: When attempting to defer a measurement with a\n confusion map. (https://github.com/quantumlib/Cirq/issues/5482)\n \"\"\"\n\n circuit = transformer_primitives.unroll_circuit_op(circuit, deep=True, tags_to_check=None)\n terminal_measurements = {op for _, op in find_terminal_measurements(circuit)}\n measurement_qubits: Dict['cirq.MeasurementKey', List['_MeasurementQid']] = {}\n\n def defer(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':\n if op in terminal_measurements:\n return op\n gate = op.gate\n if isinstance(gate, ops.MeasurementGate):\n if gate.confusion_map:\n raise NotImplementedError(\n \"Deferring confused measurement is not implemented, but found \"\n f\"measurement with key={gate.key} and non-empty confusion map.\"\n )\n key = value.MeasurementKey.parse_serialized(gate.key)\n targets = [_MeasurementQid(key, q) for q in op.qubits]\n measurement_qubits[key] = targets\n cxs = [ops.CX(q, target) for q, target in zip(op.qubits, targets)]\n xs = [ops.X(targets[i]) for i, b in enumerate(gate.full_invert_mask()) if b]\n return cxs + xs\n elif protocols.is_measurement(op):\n return [defer(op, None) for op in protocols.decompose_once(op)]\n elif op.classical_controls:\n new_op = op.without_classical_controls()\n for c in op.classical_controls:\n if isinstance(c, value.KeyCondition):\n if c.key not in measurement_qubits:\n raise ValueError(f'Deferred measurement for key={c.key} not found.')\n qs = measurement_qubits[c.key]\n if len(qs) == 1:\n control_values: Any = range(1, qs[0].dimension)\n else:\n all_values = itertools.product(*[range(q.dimension) for q in qs])\n anything_but_all_zeros = tuple(itertools.islice(all_values, 1, None))\n control_values = ops.SumOfProducts(anything_but_all_zeros)\n new_op = new_op.controlled_by(*qs, control_values=control_values)\n else:\n raise ValueError('Only KeyConditions are allowed.')\n return new_op\n return op\n\n circuit = transformer_primitives.map_operations_and_unroll(\n circuit=circuit,\n map_func=defer,\n tags_to_ignore=context.tags_to_ignore if context else (),\n raise_if_add_qubits=False,\n ).unfreeze()\n for k, qubits in measurement_qubits.items():\n circuit.append(ops.measure(*qubits, key=k))\n return circuit\n\n\n@transformer_api.transformer\ndef dephase_measurements(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = transformer_api.TransformerContext(deep=True),\n) -> 'cirq.Circuit':\n \"\"\"Changes all measurements to a dephase operation.\n\n This transformer is useful when using a density matrix simulator, when\n wishing to calculate the final density matrix of a circuit and not simulate\n the measurements themselves.\n\n Args:\n circuit: The circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options\n for transformers. The default has `deep=True` to ensure\n measurements at all levels are dephased.\n Returns:\n A copy of the circuit, with dephase operations in place of all\n measurements.\n Raises:\n ValueError: If the circuit contains classical controls. In this case,\n it is required to change these to quantum controls via\n `cirq.defer_measurements` first. Since deferral adds ancilla qubits\n to the circuit, this is not done automatically, to prevent\n surprises.\n \"\"\"\n\n def dephase(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':\n gate = op.gate\n if isinstance(gate, ops.MeasurementGate):\n key = value.MeasurementKey.parse_serialized(gate.key)\n return ops.KrausChannel.from_channel(ops.phase_damp(1), key=key).on_each(op.qubits)\n elif isinstance(op, ops.ClassicallyControlledOperation):\n raise ValueError('Use cirq.defer_measurements first to remove classical controls.')\n return op\n\n ignored = () if context is None else context.tags_to_ignore\n return transformer_primitives.map_operations(\n circuit, dephase, deep=context.deep if context else True, tags_to_ignore=ignored\n ).unfreeze()\n\n\n@transformer_api.transformer\ndef drop_terminal_measurements(\n circuit: 'cirq.AbstractCircuit',\n *,\n context: Optional['cirq.TransformerContext'] = transformer_api.TransformerContext(deep=True),\n) -> 'cirq.Circuit':\n \"\"\"Removes terminal measurements from a circuit.\n\n This transformer is helpful when trying to capture the final state vector\n of a circuit with many terminal measurements, as simulating the circuit\n with those measurements in place would otherwise collapse the final state.\n\n Args:\n circuit: The circuit to transform. It will not be modified.\n context: `cirq.TransformerContext` storing common configurable options\n for transformers. The default has `deep=True`, as \"terminal\n measurements\" is ill-defined without inspecting subcircuits;\n passing a context with `deep=False` will return an error.\n Returns:\n A copy of the circuit, with identity or X gates in place of terminal\n measurements.\n Raises:\n ValueError: if the circuit contains non-terminal measurements, or if\n the provided context has`deep=False`.\n \"\"\"\n\n if context is None or not context.deep:\n raise ValueError(\n 'Context has `deep=False`, but `deep=True` is required to drop terminal measurements.'\n )\n\n if not circuit.are_all_measurements_terminal():\n raise ValueError('Circuit contains a non-terminal measurement.')\n\n def flip_inversion(op: 'cirq.Operation', _) -> 'cirq.OP_TREE':\n if isinstance(op.gate, ops.MeasurementGate):\n return [\n ops.X(q) if b else ops.I(q) for q, b in zip(op.qubits, op.gate.full_invert_mask())\n ]\n return op\n\n ignored = () if context is None else context.tags_to_ignore\n return transformer_primitives.map_operations(\n circuit, flip_inversion, deep=context.deep if context else True, tags_to_ignore=ignored\n ).unfreeze()\n", "path": "cirq-core/cirq/transformers/measurement_transformers.py"}]}
| 3,161 | 632 |
gh_patches_debug_14786
|
rasdani/github-patches
|
git_diff
|
celery__celery-3392
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_retry_kwargs_can_be_empty fails on pypy3
From https://travis-ci.org/celery/celery/jobs/151613800:
```
======================================================================
ERROR: test_retry_kwargs_can_be_empty (celery.tests.tasks.test_tasks.test_task_retries)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/celery/celery/celery/tests/tasks/test_tasks.py", line 178, in test_retry_kwargs_can_be_empty
self.retry_task_mockapply.retry(args=[4, 4], kwargs=None)
File "/home/travis/build/celery/celery/celery/app/task.py", line 611, in retry
raise_with_context(exc or Retry('Task can be retried', None))
File "/home/travis/build/celery/celery/celery/utils/serialization.py", line 255, in raise_with_context
_raise_with_context(exc, exc_info[1])
File "<string>", line 1, in _raise_with_context
TypeError: exception causes must derive from BaseException, not NoneType
```
https://github.com/celery/celery/blob/5031d6f27862001d3e3bc5a2dacf1185c933f2c9/celery/tests/tasks/test_tasks.py#L169
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/utils/serialization.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Utilities for safely pickling exceptions."""
3 from __future__ import absolute_import, unicode_literals
4
5 import datetime
6 import numbers
7 import sys
8
9 from base64 import b64encode as base64encode, b64decode as base64decode
10 from functools import partial
11 from inspect import getmro
12 from itertools import takewhile
13
14 from kombu.utils.encoding import bytes_to_str, str_to_bytes
15
16 from celery.five import (
17 bytes_if_py2, python_2_unicode_compatible, items, reraise, string_t,
18 )
19
20 from .encoding import safe_repr
21
22 try:
23 import cPickle as pickle
24 except ImportError:
25 import pickle # noqa
26
27 PY3 = sys.version_info[0] >= 3
28
29 __all__ = [
30 'UnpickleableExceptionWrapper', 'subclass_exception',
31 'find_pickleable_exception', 'create_exception_cls',
32 'get_pickleable_exception', 'get_pickleable_etype',
33 'get_pickled_exception', 'strtobool',
34 ]
35
36 #: List of base classes we probably don't want to reduce to.
37 try:
38 unwanted_base_classes = (StandardError, Exception, BaseException, object)
39 except NameError: # pragma: no cover
40 unwanted_base_classes = (Exception, BaseException, object) # py3k
41
42
43 def subclass_exception(name, parent, module): # noqa
44 return type(bytes_if_py2(name), (parent,), {'__module__': module})
45
46
47 def find_pickleable_exception(exc, loads=pickle.loads,
48 dumps=pickle.dumps):
49 """With an exception instance, iterate over its super classes (by MRO)
50 and find the first super exception that's pickleable. It does
51 not go below :exc:`Exception` (i.e., it skips :exc:`Exception`,
52 :class:`BaseException` and :class:`object`). If that happens
53 you should use :exc:`UnpickleableException` instead.
54
55 Arguments:
56 exc (BaseException): An exception instance.
57
58 Returns:
59 Exception: Nearest pickleable parent exception class
60 (except :exc:`Exception` and parents), or if the exception is
61 pickleable it will return :const:`None`.
62 """
63 exc_args = getattr(exc, 'args', [])
64 for supercls in itermro(exc.__class__, unwanted_base_classes):
65 try:
66 superexc = supercls(*exc_args)
67 loads(dumps(superexc))
68 except:
69 pass
70 else:
71 return superexc
72
73
74 def itermro(cls, stop):
75 return takewhile(lambda sup: sup not in stop, getmro(cls))
76
77
78 def create_exception_cls(name, module, parent=None):
79 """Dynamically create an exception class."""
80 if not parent:
81 parent = Exception
82 return subclass_exception(name, parent, module)
83
84
85 @python_2_unicode_compatible
86 class UnpickleableExceptionWrapper(Exception):
87 """Wraps unpickleable exceptions.
88
89 Arguments:
90 exc_module (str): See :attr:`exc_module`.
91 exc_cls_name (str): See :attr:`exc_cls_name`.
92 exc_args (Tuple[Any, ...]): See :attr:`exc_args`.
93
94 Example:
95 >>> def pickle_it(raising_function):
96 ... try:
97 ... raising_function()
98 ... except Exception as e:
99 ... exc = UnpickleableExceptionWrapper(
100 ... e.__class__.__module__,
101 ... e.__class__.__name__,
102 ... e.args,
103 ... )
104 ... pickle.dumps(exc) # Works fine.
105 """
106
107 #: The module of the original exception.
108 exc_module = None
109
110 #: The name of the original exception class.
111 exc_cls_name = None
112
113 #: The arguments for the original exception.
114 exc_args = None
115
116 def __init__(self, exc_module, exc_cls_name, exc_args, text=None):
117 safe_exc_args = []
118 for arg in exc_args:
119 try:
120 pickle.dumps(arg)
121 safe_exc_args.append(arg)
122 except Exception:
123 safe_exc_args.append(safe_repr(arg))
124 self.exc_module = exc_module
125 self.exc_cls_name = exc_cls_name
126 self.exc_args = safe_exc_args
127 self.text = text
128 Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text)
129
130 def restore(self):
131 return create_exception_cls(self.exc_cls_name,
132 self.exc_module)(*self.exc_args)
133
134 def __str__(self):
135 return self.text
136
137 @classmethod
138 def from_exception(cls, exc):
139 return cls(exc.__class__.__module__,
140 exc.__class__.__name__,
141 getattr(exc, 'args', []),
142 safe_repr(exc))
143
144
145 def get_pickleable_exception(exc):
146 """Make sure exception is pickleable."""
147 try:
148 pickle.loads(pickle.dumps(exc))
149 except Exception:
150 pass
151 else:
152 return exc
153 nearest = find_pickleable_exception(exc)
154 if nearest:
155 return nearest
156 return UnpickleableExceptionWrapper.from_exception(exc)
157
158
159 def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps):
160 try:
161 loads(dumps(cls))
162 except:
163 return Exception
164 else:
165 return cls
166
167
168 def get_pickled_exception(exc):
169 """Get original exception from exception pickled using
170 :meth:`get_pickleable_exception`."""
171 if isinstance(exc, UnpickleableExceptionWrapper):
172 return exc.restore()
173 return exc
174
175
176 def b64encode(s):
177 return bytes_to_str(base64encode(str_to_bytes(s)))
178
179
180 def b64decode(s):
181 return base64decode(str_to_bytes(s))
182
183
184 def strtobool(term, table={'false': False, 'no': False, '0': False,
185 'true': True, 'yes': True, '1': True,
186 'on': True, 'off': False}):
187 """Convert common terms for true/false to bool
188 (true/false/yes/no/on/off/1/0)."""
189 if isinstance(term, string_t):
190 try:
191 return table[term.lower()]
192 except KeyError:
193 raise TypeError('Cannot coerce {0!r} to type bool'.format(term))
194 return term
195
196
197 def jsonify(obj,
198 builtin_types=(numbers.Real, string_t), key=None,
199 keyfilter=None,
200 unknown_type_filter=None):
201 """Transforms object making it suitable for json serialization"""
202 from kombu.abstract import Object as KombuDictType
203 _jsonify = partial(jsonify, builtin_types=builtin_types, key=key,
204 keyfilter=keyfilter,
205 unknown_type_filter=unknown_type_filter)
206
207 if isinstance(obj, KombuDictType):
208 obj = obj.as_dict(recurse=True)
209
210 if obj is None or isinstance(obj, builtin_types):
211 return obj
212 elif isinstance(obj, (tuple, list)):
213 return [_jsonify(v) for v in obj]
214 elif isinstance(obj, dict):
215 return {
216 k: _jsonify(v, key=k) for k, v in items(obj)
217 if (keyfilter(k) if keyfilter else 1)
218 }
219 elif isinstance(obj, datetime.datetime):
220 # See "Date Time String Format" in the ECMA-262 specification.
221 r = obj.isoformat()
222 if obj.microsecond:
223 r = r[:23] + r[26:]
224 if r.endswith('+00:00'):
225 r = r[:-6] + 'Z'
226 return r
227 elif isinstance(obj, datetime.date):
228 return obj.isoformat()
229 elif isinstance(obj, datetime.time):
230 r = obj.isoformat()
231 if obj.microsecond:
232 r = r[:12]
233 return r
234 elif isinstance(obj, datetime.timedelta):
235 return str(obj)
236 else:
237 if unknown_type_filter is None:
238 raise ValueError(
239 'Unsupported type: {0!r} {1!r} (parent: {2})'.format(
240 type(obj), obj, key))
241 return unknown_type_filter(obj)
242
243
244 if PY3:
245 from vine.five import exec_
246 _raise_with_context = None # for flake8
247 exec_("""def _raise_with_context(exc, ctx): raise exc from ctx""")
248
249 def raise_with_context(exc):
250 exc_info = sys.exc_info()
251 if not exc_info:
252 raise exc
253 elif exc_info[1] is exc:
254 raise
255 _raise_with_context(exc, exc_info[1])
256 else:
257 def raise_with_context(exc):
258 exc_info = sys.exc_info()
259 if not exc_info:
260 raise exc
261 if exc_info[1] is exc:
262 raise
263 elif exc_info[2]:
264 reraise(type(exc), exc, exc_info[2])
265 raise exc
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/celery/utils/serialization.py b/celery/utils/serialization.py
--- a/celery/utils/serialization.py
+++ b/celery/utils/serialization.py
@@ -24,7 +24,7 @@
except ImportError:
import pickle # noqa
-PY3 = sys.version_info[0] >= 3
+PY33 = sys.version_info >= (3, 3)
__all__ = [
'UnpickleableExceptionWrapper', 'subclass_exception',
@@ -241,7 +241,9 @@
return unknown_type_filter(obj)
-if PY3:
+# Since PyPy 3 targets Python 3.2, 'raise exc from None' will
+# raise a TypeError so we need to look for Python 3.3 or newer
+if PY33:
from vine.five import exec_
_raise_with_context = None # for flake8
exec_("""def _raise_with_context(exc, ctx): raise exc from ctx""")
|
{"golden_diff": "diff --git a/celery/utils/serialization.py b/celery/utils/serialization.py\n--- a/celery/utils/serialization.py\n+++ b/celery/utils/serialization.py\n@@ -24,7 +24,7 @@\n except ImportError:\n import pickle # noqa\n \n-PY3 = sys.version_info[0] >= 3\n+PY33 = sys.version_info >= (3, 3)\n \n __all__ = [\n 'UnpickleableExceptionWrapper', 'subclass_exception',\n@@ -241,7 +241,9 @@\n return unknown_type_filter(obj)\n \n \n-if PY3:\n+# Since PyPy 3 targets Python 3.2, 'raise exc from None' will\n+# raise a TypeError so we need to look for Python 3.3 or newer\n+if PY33:\n from vine.five import exec_\n _raise_with_context = None # for flake8\n exec_(\"\"\"def _raise_with_context(exc, ctx): raise exc from ctx\"\"\")\n", "issue": "test_retry_kwargs_can_be_empty fails on pypy3\nFrom https://travis-ci.org/celery/celery/jobs/151613800:\n\n```\n======================================================================\nERROR: test_retry_kwargs_can_be_empty (celery.tests.tasks.test_tasks.test_task_retries)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/travis/build/celery/celery/celery/tests/tasks/test_tasks.py\", line 178, in test_retry_kwargs_can_be_empty\n self.retry_task_mockapply.retry(args=[4, 4], kwargs=None)\n File \"/home/travis/build/celery/celery/celery/app/task.py\", line 611, in retry\n raise_with_context(exc or Retry('Task can be retried', None))\n File \"/home/travis/build/celery/celery/celery/utils/serialization.py\", line 255, in raise_with_context\n _raise_with_context(exc, exc_info[1])\n File \"<string>\", line 1, in _raise_with_context\nTypeError: exception causes must derive from BaseException, not NoneType\n```\n\nhttps://github.com/celery/celery/blob/5031d6f27862001d3e3bc5a2dacf1185c933f2c9/celery/tests/tasks/test_tasks.py#L169\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Utilities for safely pickling exceptions.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport datetime\nimport numbers\nimport sys\n\nfrom base64 import b64encode as base64encode, b64decode as base64decode\nfrom functools import partial\nfrom inspect import getmro\nfrom itertools import takewhile\n\nfrom kombu.utils.encoding import bytes_to_str, str_to_bytes\n\nfrom celery.five import (\n bytes_if_py2, python_2_unicode_compatible, items, reraise, string_t,\n)\n\nfrom .encoding import safe_repr\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle # noqa\n\nPY3 = sys.version_info[0] >= 3\n\n__all__ = [\n 'UnpickleableExceptionWrapper', 'subclass_exception',\n 'find_pickleable_exception', 'create_exception_cls',\n 'get_pickleable_exception', 'get_pickleable_etype',\n 'get_pickled_exception', 'strtobool',\n]\n\n#: List of base classes we probably don't want to reduce to.\ntry:\n unwanted_base_classes = (StandardError, Exception, BaseException, object)\nexcept NameError: # pragma: no cover\n unwanted_base_classes = (Exception, BaseException, object) # py3k\n\n\ndef subclass_exception(name, parent, module): # noqa\n return type(bytes_if_py2(name), (parent,), {'__module__': module})\n\n\ndef find_pickleable_exception(exc, loads=pickle.loads,\n dumps=pickle.dumps):\n \"\"\"With an exception instance, iterate over its super classes (by MRO)\n and find the first super exception that's pickleable. It does\n not go below :exc:`Exception` (i.e., it skips :exc:`Exception`,\n :class:`BaseException` and :class:`object`). If that happens\n you should use :exc:`UnpickleableException` instead.\n\n Arguments:\n exc (BaseException): An exception instance.\n\n Returns:\n Exception: Nearest pickleable parent exception class\n (except :exc:`Exception` and parents), or if the exception is\n pickleable it will return :const:`None`.\n \"\"\"\n exc_args = getattr(exc, 'args', [])\n for supercls in itermro(exc.__class__, unwanted_base_classes):\n try:\n superexc = supercls(*exc_args)\n loads(dumps(superexc))\n except:\n pass\n else:\n return superexc\n\n\ndef itermro(cls, stop):\n return takewhile(lambda sup: sup not in stop, getmro(cls))\n\n\ndef create_exception_cls(name, module, parent=None):\n \"\"\"Dynamically create an exception class.\"\"\"\n if not parent:\n parent = Exception\n return subclass_exception(name, parent, module)\n\n\n@python_2_unicode_compatible\nclass UnpickleableExceptionWrapper(Exception):\n \"\"\"Wraps unpickleable exceptions.\n\n Arguments:\n exc_module (str): See :attr:`exc_module`.\n exc_cls_name (str): See :attr:`exc_cls_name`.\n exc_args (Tuple[Any, ...]): See :attr:`exc_args`.\n\n Example:\n >>> def pickle_it(raising_function):\n ... try:\n ... raising_function()\n ... except Exception as e:\n ... exc = UnpickleableExceptionWrapper(\n ... e.__class__.__module__,\n ... e.__class__.__name__,\n ... e.args,\n ... )\n ... pickle.dumps(exc) # Works fine.\n \"\"\"\n\n #: The module of the original exception.\n exc_module = None\n\n #: The name of the original exception class.\n exc_cls_name = None\n\n #: The arguments for the original exception.\n exc_args = None\n\n def __init__(self, exc_module, exc_cls_name, exc_args, text=None):\n safe_exc_args = []\n for arg in exc_args:\n try:\n pickle.dumps(arg)\n safe_exc_args.append(arg)\n except Exception:\n safe_exc_args.append(safe_repr(arg))\n self.exc_module = exc_module\n self.exc_cls_name = exc_cls_name\n self.exc_args = safe_exc_args\n self.text = text\n Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text)\n\n def restore(self):\n return create_exception_cls(self.exc_cls_name,\n self.exc_module)(*self.exc_args)\n\n def __str__(self):\n return self.text\n\n @classmethod\n def from_exception(cls, exc):\n return cls(exc.__class__.__module__,\n exc.__class__.__name__,\n getattr(exc, 'args', []),\n safe_repr(exc))\n\n\ndef get_pickleable_exception(exc):\n \"\"\"Make sure exception is pickleable.\"\"\"\n try:\n pickle.loads(pickle.dumps(exc))\n except Exception:\n pass\n else:\n return exc\n nearest = find_pickleable_exception(exc)\n if nearest:\n return nearest\n return UnpickleableExceptionWrapper.from_exception(exc)\n\n\ndef get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps):\n try:\n loads(dumps(cls))\n except:\n return Exception\n else:\n return cls\n\n\ndef get_pickled_exception(exc):\n \"\"\"Get original exception from exception pickled using\n :meth:`get_pickleable_exception`.\"\"\"\n if isinstance(exc, UnpickleableExceptionWrapper):\n return exc.restore()\n return exc\n\n\ndef b64encode(s):\n return bytes_to_str(base64encode(str_to_bytes(s)))\n\n\ndef b64decode(s):\n return base64decode(str_to_bytes(s))\n\n\ndef strtobool(term, table={'false': False, 'no': False, '0': False,\n 'true': True, 'yes': True, '1': True,\n 'on': True, 'off': False}):\n \"\"\"Convert common terms for true/false to bool\n (true/false/yes/no/on/off/1/0).\"\"\"\n if isinstance(term, string_t):\n try:\n return table[term.lower()]\n except KeyError:\n raise TypeError('Cannot coerce {0!r} to type bool'.format(term))\n return term\n\n\ndef jsonify(obj,\n builtin_types=(numbers.Real, string_t), key=None,\n keyfilter=None,\n unknown_type_filter=None):\n \"\"\"Transforms object making it suitable for json serialization\"\"\"\n from kombu.abstract import Object as KombuDictType\n _jsonify = partial(jsonify, builtin_types=builtin_types, key=key,\n keyfilter=keyfilter,\n unknown_type_filter=unknown_type_filter)\n\n if isinstance(obj, KombuDictType):\n obj = obj.as_dict(recurse=True)\n\n if obj is None or isinstance(obj, builtin_types):\n return obj\n elif isinstance(obj, (tuple, list)):\n return [_jsonify(v) for v in obj]\n elif isinstance(obj, dict):\n return {\n k: _jsonify(v, key=k) for k, v in items(obj)\n if (keyfilter(k) if keyfilter else 1)\n }\n elif isinstance(obj, datetime.datetime):\n # See \"Date Time String Format\" in the ECMA-262 specification.\n r = obj.isoformat()\n if obj.microsecond:\n r = r[:23] + r[26:]\n if r.endswith('+00:00'):\n r = r[:-6] + 'Z'\n return r\n elif isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, datetime.time):\n r = obj.isoformat()\n if obj.microsecond:\n r = r[:12]\n return r\n elif isinstance(obj, datetime.timedelta):\n return str(obj)\n else:\n if unknown_type_filter is None:\n raise ValueError(\n 'Unsupported type: {0!r} {1!r} (parent: {2})'.format(\n type(obj), obj, key))\n return unknown_type_filter(obj)\n\n\nif PY3:\n from vine.five import exec_\n _raise_with_context = None # for flake8\n exec_(\"\"\"def _raise_with_context(exc, ctx): raise exc from ctx\"\"\")\n\n def raise_with_context(exc):\n exc_info = sys.exc_info()\n if not exc_info:\n raise exc\n elif exc_info[1] is exc:\n raise\n _raise_with_context(exc, exc_info[1])\nelse:\n def raise_with_context(exc):\n exc_info = sys.exc_info()\n if not exc_info:\n raise exc\n if exc_info[1] is exc:\n raise\n elif exc_info[2]:\n reraise(type(exc), exc, exc_info[2])\n raise exc\n", "path": "celery/utils/serialization.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Utilities for safely pickling exceptions.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport datetime\nimport numbers\nimport sys\n\nfrom base64 import b64encode as base64encode, b64decode as base64decode\nfrom functools import partial\nfrom inspect import getmro\nfrom itertools import takewhile\n\nfrom kombu.utils.encoding import bytes_to_str, str_to_bytes\n\nfrom celery.five import (\n bytes_if_py2, python_2_unicode_compatible, items, reraise, string_t,\n)\n\nfrom .encoding import safe_repr\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle # noqa\n\nPY33 = sys.version_info >= (3, 3)\n\n__all__ = [\n 'UnpickleableExceptionWrapper', 'subclass_exception',\n 'find_pickleable_exception', 'create_exception_cls',\n 'get_pickleable_exception', 'get_pickleable_etype',\n 'get_pickled_exception', 'strtobool',\n]\n\n#: List of base classes we probably don't want to reduce to.\ntry:\n unwanted_base_classes = (StandardError, Exception, BaseException, object)\nexcept NameError: # pragma: no cover\n unwanted_base_classes = (Exception, BaseException, object) # py3k\n\n\ndef subclass_exception(name, parent, module): # noqa\n return type(bytes_if_py2(name), (parent,), {'__module__': module})\n\n\ndef find_pickleable_exception(exc, loads=pickle.loads,\n dumps=pickle.dumps):\n \"\"\"With an exception instance, iterate over its super classes (by MRO)\n and find the first super exception that's pickleable. It does\n not go below :exc:`Exception` (i.e., it skips :exc:`Exception`,\n :class:`BaseException` and :class:`object`). If that happens\n you should use :exc:`UnpickleableException` instead.\n\n Arguments:\n exc (BaseException): An exception instance.\n\n Returns:\n Exception: Nearest pickleable parent exception class\n (except :exc:`Exception` and parents), or if the exception is\n pickleable it will return :const:`None`.\n \"\"\"\n exc_args = getattr(exc, 'args', [])\n for supercls in itermro(exc.__class__, unwanted_base_classes):\n try:\n superexc = supercls(*exc_args)\n loads(dumps(superexc))\n except:\n pass\n else:\n return superexc\n\n\ndef itermro(cls, stop):\n return takewhile(lambda sup: sup not in stop, getmro(cls))\n\n\ndef create_exception_cls(name, module, parent=None):\n \"\"\"Dynamically create an exception class.\"\"\"\n if not parent:\n parent = Exception\n return subclass_exception(name, parent, module)\n\n\n@python_2_unicode_compatible\nclass UnpickleableExceptionWrapper(Exception):\n \"\"\"Wraps unpickleable exceptions.\n\n Arguments:\n exc_module (str): See :attr:`exc_module`.\n exc_cls_name (str): See :attr:`exc_cls_name`.\n exc_args (Tuple[Any, ...]): See :attr:`exc_args`.\n\n Example:\n >>> def pickle_it(raising_function):\n ... try:\n ... raising_function()\n ... except Exception as e:\n ... exc = UnpickleableExceptionWrapper(\n ... e.__class__.__module__,\n ... e.__class__.__name__,\n ... e.args,\n ... )\n ... pickle.dumps(exc) # Works fine.\n \"\"\"\n\n #: The module of the original exception.\n exc_module = None\n\n #: The name of the original exception class.\n exc_cls_name = None\n\n #: The arguments for the original exception.\n exc_args = None\n\n def __init__(self, exc_module, exc_cls_name, exc_args, text=None):\n safe_exc_args = []\n for arg in exc_args:\n try:\n pickle.dumps(arg)\n safe_exc_args.append(arg)\n except Exception:\n safe_exc_args.append(safe_repr(arg))\n self.exc_module = exc_module\n self.exc_cls_name = exc_cls_name\n self.exc_args = safe_exc_args\n self.text = text\n Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text)\n\n def restore(self):\n return create_exception_cls(self.exc_cls_name,\n self.exc_module)(*self.exc_args)\n\n def __str__(self):\n return self.text\n\n @classmethod\n def from_exception(cls, exc):\n return cls(exc.__class__.__module__,\n exc.__class__.__name__,\n getattr(exc, 'args', []),\n safe_repr(exc))\n\n\ndef get_pickleable_exception(exc):\n \"\"\"Make sure exception is pickleable.\"\"\"\n try:\n pickle.loads(pickle.dumps(exc))\n except Exception:\n pass\n else:\n return exc\n nearest = find_pickleable_exception(exc)\n if nearest:\n return nearest\n return UnpickleableExceptionWrapper.from_exception(exc)\n\n\ndef get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps):\n try:\n loads(dumps(cls))\n except:\n return Exception\n else:\n return cls\n\n\ndef get_pickled_exception(exc):\n \"\"\"Get original exception from exception pickled using\n :meth:`get_pickleable_exception`.\"\"\"\n if isinstance(exc, UnpickleableExceptionWrapper):\n return exc.restore()\n return exc\n\n\ndef b64encode(s):\n return bytes_to_str(base64encode(str_to_bytes(s)))\n\n\ndef b64decode(s):\n return base64decode(str_to_bytes(s))\n\n\ndef strtobool(term, table={'false': False, 'no': False, '0': False,\n 'true': True, 'yes': True, '1': True,\n 'on': True, 'off': False}):\n \"\"\"Convert common terms for true/false to bool\n (true/false/yes/no/on/off/1/0).\"\"\"\n if isinstance(term, string_t):\n try:\n return table[term.lower()]\n except KeyError:\n raise TypeError('Cannot coerce {0!r} to type bool'.format(term))\n return term\n\n\ndef jsonify(obj,\n builtin_types=(numbers.Real, string_t), key=None,\n keyfilter=None,\n unknown_type_filter=None):\n \"\"\"Transforms object making it suitable for json serialization\"\"\"\n from kombu.abstract import Object as KombuDictType\n _jsonify = partial(jsonify, builtin_types=builtin_types, key=key,\n keyfilter=keyfilter,\n unknown_type_filter=unknown_type_filter)\n\n if isinstance(obj, KombuDictType):\n obj = obj.as_dict(recurse=True)\n\n if obj is None or isinstance(obj, builtin_types):\n return obj\n elif isinstance(obj, (tuple, list)):\n return [_jsonify(v) for v in obj]\n elif isinstance(obj, dict):\n return {\n k: _jsonify(v, key=k) for k, v in items(obj)\n if (keyfilter(k) if keyfilter else 1)\n }\n elif isinstance(obj, datetime.datetime):\n # See \"Date Time String Format\" in the ECMA-262 specification.\n r = obj.isoformat()\n if obj.microsecond:\n r = r[:23] + r[26:]\n if r.endswith('+00:00'):\n r = r[:-6] + 'Z'\n return r\n elif isinstance(obj, datetime.date):\n return obj.isoformat()\n elif isinstance(obj, datetime.time):\n r = obj.isoformat()\n if obj.microsecond:\n r = r[:12]\n return r\n elif isinstance(obj, datetime.timedelta):\n return str(obj)\n else:\n if unknown_type_filter is None:\n raise ValueError(\n 'Unsupported type: {0!r} {1!r} (parent: {2})'.format(\n type(obj), obj, key))\n return unknown_type_filter(obj)\n\n\n# Since PyPy 3 targets Python 3.2, 'raise exc from None' will\n# raise a TypeError so we need to look for Python 3.3 or newer\nif PY33:\n from vine.five import exec_\n _raise_with_context = None # for flake8\n exec_(\"\"\"def _raise_with_context(exc, ctx): raise exc from ctx\"\"\")\n\n def raise_with_context(exc):\n exc_info = sys.exc_info()\n if not exc_info:\n raise exc\n elif exc_info[1] is exc:\n raise\n _raise_with_context(exc, exc_info[1])\nelse:\n def raise_with_context(exc):\n exc_info = sys.exc_info()\n if not exc_info:\n raise exc\n if exc_info[1] is exc:\n raise\n elif exc_info[2]:\n reraise(type(exc), exc, exc_info[2])\n raise exc\n", "path": "celery/utils/serialization.py"}]}
| 3,166 | 220 |
gh_patches_debug_13384
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-5460
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/query_metrics/query_table.py`
Content:
```
1 from typing import Any, Dict, List, Optional, Union
2
3 from great_expectations.core.metric_domain_types import MetricDomainTypes
4 from great_expectations.execution_engine import (
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.import_manager import (
9 pyspark_sql_DataFrame,
10 pyspark_sql_Row,
11 pyspark_sql_SparkSession,
12 sa,
13 sqlalchemy_engine_Engine,
14 sqlalchemy_engine_Row,
15 )
16 from great_expectations.expectations.metrics.metric_provider import metric_value
17 from great_expectations.expectations.metrics.query_metric_provider import (
18 QueryMetricProvider,
19 )
20
21
22 class QueryTable(QueryMetricProvider):
23 metric_name = "query.table"
24 value_keys = ("query",)
25
26 @metric_value(engine=SqlAlchemyExecutionEngine)
27 def _sqlalchemy(
28 cls,
29 execution_engine: SqlAlchemyExecutionEngine,
30 metric_domain_kwargs: dict,
31 metric_value_kwargs: dict,
32 metrics: Dict[str, Any],
33 runtime_configuration: dict,
34 ) -> List[sqlalchemy_engine_Row]:
35 query: Optional[str] = metric_value_kwargs.get(
36 "query"
37 ) or cls.default_kwarg_values.get("query")
38
39 selectable: Union[sa.sql.Selectable, str]
40 selectable, _, _ = execution_engine.get_compute_domain(
41 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
42 )
43
44 if isinstance(selectable, sa.Table):
45 query = query.format(active_batch=selectable)
46 elif isinstance(
47 selectable, sa.sql.Subquery
48 ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break
49 query = query.format(active_batch=f"({selectable})")
50 elif isinstance(
51 selectable, sa.sql.Select
52 ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query
53 query = query.format(
54 active_batch=f'({selectable.compile(compile_kwargs={"literal_binds": True})}) AS subselect',
55 )
56 else:
57 query = query.format(active_batch=f"({selectable})")
58
59 engine: sqlalchemy_engine_Engine = execution_engine.engine
60 result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()
61
62 return result
63
64 @metric_value(engine=SparkDFExecutionEngine)
65 def _spark(
66 cls,
67 execution_engine: SparkDFExecutionEngine,
68 metric_domain_kwargs: dict,
69 metric_value_kwargs: dict,
70 metrics: Dict[str, Any],
71 runtime_configuration: dict,
72 ) -> List[pyspark_sql_Row]:
73 query: Optional[str] = metric_value_kwargs.get(
74 "query"
75 ) or cls.default_kwarg_values.get("query")
76
77 df: pyspark_sql_DataFrame
78 df, _, _ = execution_engine.get_compute_domain(
79 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
80 )
81
82 df.createOrReplaceTempView("tmp_view")
83 query = query.format(active_batch="tmp_view")
84
85 engine: pyspark_sql_SparkSession = execution_engine.spark
86 result: List[pyspark_sql_Row] = engine.sql(query).collect()
87
88 return result
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/expectations/metrics/query_metrics/query_table.py b/great_expectations/expectations/metrics/query_metrics/query_table.py
--- a/great_expectations/expectations/metrics/query_metrics/query_table.py
+++ b/great_expectations/expectations/metrics/query_metrics/query_table.py
@@ -23,6 +23,7 @@
metric_name = "query.table"
value_keys = ("query",)
+ # <snippet>
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
@@ -60,6 +61,7 @@
result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()
return result
+ # </snippet>
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
|
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/query_metrics/query_table.py b/great_expectations/expectations/metrics/query_metrics/query_table.py\n--- a/great_expectations/expectations/metrics/query_metrics/query_table.py\n+++ b/great_expectations/expectations/metrics/query_metrics/query_table.py\n@@ -23,6 +23,7 @@\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n \n+ # <snippet>\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n@@ -60,6 +61,7 @@\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n \n return result\n+ # </snippet>\n \n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Union\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import (\n pyspark_sql_DataFrame,\n pyspark_sql_Row,\n pyspark_sql_SparkSession,\n sa,\n sqlalchemy_engine_Engine,\n sqlalchemy_engine_Row,\n)\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.query_metric_provider import (\n QueryMetricProvider,\n)\n\n\nclass QueryTable(QueryMetricProvider):\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[sqlalchemy_engine_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n selectable: Union[sa.sql.Selectable, str]\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n if isinstance(selectable, sa.Table):\n query = query.format(active_batch=selectable)\n elif isinstance(\n selectable, sa.sql.Subquery\n ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break\n query = query.format(active_batch=f\"({selectable})\")\n elif isinstance(\n selectable, sa.sql.Select\n ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query\n query = query.format(\n active_batch=f'({selectable.compile(compile_kwargs={\"literal_binds\": True})}) AS subselect',\n )\n else:\n query = query.format(active_batch=f\"({selectable})\")\n\n engine: sqlalchemy_engine_Engine = execution_engine.engine\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n\n return result\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[pyspark_sql_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n df: pyspark_sql_DataFrame\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n df.createOrReplaceTempView(\"tmp_view\")\n query = query.format(active_batch=\"tmp_view\")\n\n engine: pyspark_sql_SparkSession = execution_engine.spark\n result: List[pyspark_sql_Row] = engine.sql(query).collect()\n\n return result\n", "path": "great_expectations/expectations/metrics/query_metrics/query_table.py"}], "after_files": [{"content": "from typing import Any, Dict, List, Optional, Union\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import (\n pyspark_sql_DataFrame,\n pyspark_sql_Row,\n pyspark_sql_SparkSession,\n sa,\n sqlalchemy_engine_Engine,\n sqlalchemy_engine_Row,\n)\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.query_metric_provider import (\n QueryMetricProvider,\n)\n\n\nclass QueryTable(QueryMetricProvider):\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n\n # <snippet>\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[sqlalchemy_engine_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n selectable: Union[sa.sql.Selectable, str]\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n if isinstance(selectable, sa.Table):\n query = query.format(active_batch=selectable)\n elif isinstance(\n selectable, sa.sql.Subquery\n ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break\n query = query.format(active_batch=f\"({selectable})\")\n elif isinstance(\n selectable, sa.sql.Select\n ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query\n query = query.format(\n active_batch=f'({selectable.compile(compile_kwargs={\"literal_binds\": True})}) AS subselect',\n )\n else:\n query = query.format(active_batch=f\"({selectable})\")\n\n engine: sqlalchemy_engine_Engine = execution_engine.engine\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n\n return result\n # </snippet>\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[pyspark_sql_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n df: pyspark_sql_DataFrame\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n df.createOrReplaceTempView(\"tmp_view\")\n query = query.format(active_batch=\"tmp_view\")\n\n engine: pyspark_sql_SparkSession = execution_engine.spark\n result: List[pyspark_sql_Row] = engine.sql(query).collect()\n\n return result\n", "path": "great_expectations/expectations/metrics/query_metrics/query_table.py"}]}
| 1,163 | 183 |
gh_patches_debug_31612
|
rasdani/github-patches
|
git_diff
|
tough-dev-school__education-backend-855
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Слать в лототрон только один ответ от одного пользователя
Сейчас, если пользователь дал три ответа на домашку, то при кросс-проверке все три ответа уйдут разным студентам. Это — плохо, нужно либо слать только первый ответ, либо собирать все ответы в пачку и слать их одному пользователю.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/homework/services/answer_crosscheck_dispatcher.py`
Content:
```
1 from typing import Optional
2
3 from django.db import transaction
4 from django.db.models import Count, Q, QuerySet
5
6 from homework.models import Answer, AnswerCrossCheck
7 from users.models import User
8
9
10 class AnswerCrossCheckDispatcher:
11 """Given a bunch of answers and users, create a cross-check record
12 for each of them, making sure each answer has a user to check
13 and number of answers if equal for each user
14 """
15 def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):
16 self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])
17 self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')
18 self.answers_per_user = answers_per_user
19
20 @transaction.atomic
21 def __call__(self) -> list[AnswerCrossCheck]:
22 crosschecks = list()
23 for user in self.users.iterator():
24 for _ in range(self.answers_per_user):
25 answer = self.get_answer_to_check(user)
26 if answer is not None:
27 crosschecks.append(
28 self.give_answer_to_user(answer, user),
29 )
30
31 return crosschecks
32
33 def get_answer_to_check(self, user: User) -> Optional[Answer]:
34 return self.get_answers_with_crosscheck_count() \
35 .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \
36 .exclude(already_checking__gte=1) \
37 .exclude(author=user) \
38 .exclude(do_not_crosscheck=True) \
39 .order_by('crosscheck_count').first()
40
41 def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:
42 return AnswerCrossCheck.objects.create(answer=answer, checker=user)
43
44 def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:
45 return self.answers.annotate(
46 crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),
47 )
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/homework/services/answer_crosscheck_dispatcher.py b/src/homework/services/answer_crosscheck_dispatcher.py
--- a/src/homework/services/answer_crosscheck_dispatcher.py
+++ b/src/homework/services/answer_crosscheck_dispatcher.py
@@ -9,11 +9,12 @@
class AnswerCrossCheckDispatcher:
"""Given a bunch of answers and users, create a cross-check record
- for each of them, making sure each answer has a user to check
- and number of answers if equal for each user
+ for each of them, making sure the first answer of each user has a user to
+ check and number of answers if equal for each user
"""
def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):
self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])
+ self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')
self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')
self.answers_per_user = answers_per_user
@@ -27,11 +28,11 @@
crosschecks.append(
self.give_answer_to_user(answer, user),
)
-
return crosschecks
def get_answer_to_check(self, user: User) -> Optional[Answer]:
return self.get_answers_with_crosscheck_count() \
+ .filter(id__in=self.unique_author_answers) \
.annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \
.exclude(already_checking__gte=1) \
.exclude(author=user) \
|
{"golden_diff": "diff --git a/src/homework/services/answer_crosscheck_dispatcher.py b/src/homework/services/answer_crosscheck_dispatcher.py\n--- a/src/homework/services/answer_crosscheck_dispatcher.py\n+++ b/src/homework/services/answer_crosscheck_dispatcher.py\n@@ -9,11 +9,12 @@\n \n class AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n- for each of them, making sure each answer has a user to check\n- and number of answers if equal for each user\n+ for each of them, making sure the first answer of each user has a user to\n+ check and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n+ self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n \n@@ -27,11 +28,11 @@\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n-\n return crosschecks\n \n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n+ .filter(id__in=self.unique_author_answers) \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n", "issue": "\u0421\u043b\u0430\u0442\u044c \u0432 \u043b\u043e\u0442\u043e\u0442\u0440\u043e\u043d \u0442\u043e\u043b\u044c\u043a\u043e \u043e\u0434\u0438\u043d \u043e\u0442\u0432\u0435\u0442 \u043e\u0442 \u043e\u0434\u043d\u043e\u0433\u043e \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f\n\u0421\u0435\u0439\u0447\u0430\u0441, \u0435\u0441\u043b\u0438 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0434\u0430\u043b \u0442\u0440\u0438 \u043e\u0442\u0432\u0435\u0442\u0430 \u043d\u0430 \u0434\u043e\u043c\u0430\u0448\u043a\u0443, \u0442\u043e \u043f\u0440\u0438 \u043a\u0440\u043e\u0441\u0441-\u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0435 \u0432\u0441\u0435 \u0442\u0440\u0438 \u043e\u0442\u0432\u0435\u0442\u0430 \u0443\u0439\u0434\u0443\u0442 \u0440\u0430\u0437\u043d\u044b\u043c \u0441\u0442\u0443\u0434\u0435\u043d\u0442\u0430\u043c. \u042d\u0442\u043e \u2014 \u043f\u043b\u043e\u0445\u043e, \u043d\u0443\u0436\u043d\u043e \u043b\u0438\u0431\u043e \u0441\u043b\u0430\u0442\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u043f\u0435\u0440\u0432\u044b\u0439 \u043e\u0442\u0432\u0435\u0442, \u043b\u0438\u0431\u043e \u0441\u043e\u0431\u0438\u0440\u0430\u0442\u044c \u0432\u0441\u0435 \u043e\u0442\u0432\u0435\u0442\u044b \u0432 \u043f\u0430\u0447\u043a\u0443 \u0438 \u0441\u043b\u0430\u0442\u044c \u0438\u0445 \u043e\u0434\u043d\u043e\u043c\u0443 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044e.\n", "before_files": [{"content": "from typing import Optional\n\nfrom django.db import transaction\nfrom django.db.models import Count, Q, QuerySet\n\nfrom homework.models import Answer, AnswerCrossCheck\nfrom users.models import User\n\n\nclass AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n for each of them, making sure each answer has a user to check\n and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n\n @transaction.atomic\n def __call__(self) -> list[AnswerCrossCheck]:\n crosschecks = list()\n for user in self.users.iterator():\n for _ in range(self.answers_per_user):\n answer = self.get_answer_to_check(user)\n if answer is not None:\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n\n return crosschecks\n\n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n .exclude(do_not_crosscheck=True) \\\n .order_by('crosscheck_count').first()\n\n def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:\n return AnswerCrossCheck.objects.create(answer=answer, checker=user)\n\n def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:\n return self.answers.annotate(\n crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),\n )\n", "path": "src/homework/services/answer_crosscheck_dispatcher.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom django.db import transaction\nfrom django.db.models import Count, Q, QuerySet\n\nfrom homework.models import Answer, AnswerCrossCheck\nfrom users.models import User\n\n\nclass AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n for each of them, making sure the first answer of each user has a user to\n check and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n\n @transaction.atomic\n def __call__(self) -> list[AnswerCrossCheck]:\n crosschecks = list()\n for user in self.users.iterator():\n for _ in range(self.answers_per_user):\n answer = self.get_answer_to_check(user)\n if answer is not None:\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n return crosschecks\n\n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n .filter(id__in=self.unique_author_answers) \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n .exclude(do_not_crosscheck=True) \\\n .order_by('crosscheck_count').first()\n\n def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:\n return AnswerCrossCheck.objects.create(answer=answer, checker=user)\n\n def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:\n return self.answers.annotate(\n crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),\n )\n", "path": "src/homework/services/answer_crosscheck_dispatcher.py"}]}
| 880 | 382 |
gh_patches_debug_9548
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3676
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
c7n-mailer-replay : Python3 - TypeError: write() argument must be str, not bytes
Using c7n-mailer-replay under Python3 gives the following trace:
```
Traceback (most recent call last):
File "HOME/.pyenv/versions/cloud-custodian-3.6/bin/c7n-mailer-replay", line 11, in <module>
load_entry_point('c7n-mailer', 'console_scripts', 'c7n-mailer-replay')()
File "HOME/CLOUD_CUSTODIAN/SRC/tools/c7n_mailer/c7n_mailer/replay.py", line 134, in main
json_dump_file=options.json_dump_file
File "HOME/CLOUD_CUSTODIAN/SRC/tools/c7n_mailer/c7n_mailer/replay.py", line 46, in __init__
fh.write(raw)
TypeError: write() argument must be str, not bytes
```
I had success with the following change:
```diff
diff --git a/tools/c7n_mailer/c7n_mailer/replay.py b/tools/c7n_mailer/c7n_mailer/replay.py
index b3f5456be..72f63332f 100644
--- a/tools/c7n_mailer/c7n_mailer/replay.py
+++ b/tools/c7n_mailer/c7n_mailer/replay.py
@@ -42,7 +42,7 @@ class MailerTester(object):
logger.debug('base64-decoding and zlib decompressing message')
raw = zlib.decompress(base64.b64decode(raw))
if json_dump_file is not None:
- with open(json_dump_file, 'w') as fh:
+ with open(json_dump_file, 'wb') as fh:
fh.write(raw)
self.data = json.loads(raw)
logger.debug('Loaded message JSON')
```
I believe it could be compatible with Python2 also, but it needs some testing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_mailer/c7n_mailer/replay.py`
Content:
```
1 """
2 Allow local testing of mailer and templates by replaying an SQS message.
3
4 MAILER_FILE input is a file containing the exact base64-encoded, gzipped
5 data that's enqueued to SQS via :py:meth:`c7n.actions.Notify.send_sqs`.
6
7 Alternatively, with -p|--plain specified, the file will be assumed to be
8 JSON data that can be loaded directly.
9 """
10 from __future__ import absolute_import, division, print_function, unicode_literals
11
12 import argparse
13 import boto3
14 import os
15 import logging
16 import zlib
17 import base64
18 import json
19
20 import jsonschema
21 from ruamel import yaml
22
23 from c7n_mailer.utils import setup_defaults
24 from c7n_mailer.cli import CONFIG_SCHEMA
25 from .email_delivery import EmailDelivery
26
27 logger = logging.getLogger(__name__)
28
29
30 class MailerTester(object):
31
32 def __init__(self, msg_file, config, msg_plain=False, json_dump_file=None):
33 if not os.path.exists(msg_file):
34 raise RuntimeError("File does not exist: %s" % msg_file)
35 logger.debug('Reading message from: %s', msg_file)
36 with open(msg_file, 'r') as fh:
37 raw = fh.read()
38 logger.debug('Read %d byte message', len(raw))
39 if msg_plain:
40 raw = raw.strip()
41 else:
42 logger.debug('base64-decoding and zlib decompressing message')
43 raw = zlib.decompress(base64.b64decode(raw))
44 if json_dump_file is not None:
45 with open(json_dump_file, 'w') as fh:
46 fh.write(raw)
47 self.data = json.loads(raw)
48 logger.debug('Loaded message JSON')
49 self.config = config
50 self.session = boto3.Session()
51
52 def run(self, dry_run=False, print_only=False):
53 emd = EmailDelivery(self.config, self.session, logger)
54 addrs_to_msgs = emd.get_to_addrs_email_messages_map(self.data)
55 logger.info('Would send email to: %s', addrs_to_msgs.keys())
56 if print_only:
57 mime = emd.get_mimetext_message(
58 self.data, self.data['resources'], ['[email protected]']
59 )
60 logger.info('Send mail with subject: "%s"', mime['Subject'])
61 print(mime.get_payload(None, True))
62 return
63 if dry_run:
64 for to_addrs, mimetext_msg in addrs_to_msgs.items():
65 print('-> SEND MESSAGE TO: %s' % '; '.join(to_addrs))
66 print(mimetext_msg.get_payload(None, True))
67 return
68 # else actually send the message...
69 for to_addrs, mimetext_msg in addrs_to_msgs.items():
70 logger.info('Actually sending mail to: %s', to_addrs)
71 emd.send_c7n_email(self.data, list(to_addrs), mimetext_msg)
72
73
74 def setup_parser():
75 parser = argparse.ArgumentParser('Test c7n-mailer templates and mail')
76 parser.add_argument('-c', '--config', required=True)
77 parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',
78 default=False,
79 help='Log messages that would be sent, but do not send')
80 parser.add_argument('-T', '--template-print', dest='print_only',
81 action='store_true', default=False,
82 help='Just print rendered templates')
83 parser.add_argument('-t', '--templates', default=None, type=str,
84 help='message templates folder location')
85 parser.add_argument('-p', '--plain', dest='plain', action='store_true',
86 default=False,
87 help='Expect MESSAGE_FILE to be a plain string, '
88 'rather than the base64-encoded, gzipped SQS '
89 'message format')
90 parser.add_argument('-j', '--json-dump-file', dest='json_dump_file',
91 type=str, action='store', default=None,
92 help='If dump JSON of MESSAGE_FILE to this path; '
93 'useful to base64-decode and gunzip a message')
94 parser.add_argument('MESSAGE_FILE', type=str,
95 help='Path to SQS message dump/content file')
96 return parser
97
98
99 def session_factory(config):
100 return boto3.Session(
101 region_name=config['region'],
102 profile_name=config.get('profile'))
103
104
105 def main():
106 parser = setup_parser()
107 options = parser.parse_args()
108
109 module_dir = os.path.dirname(os.path.abspath(__file__))
110 default_templates = [
111 os.path.abspath(os.path.join(module_dir, 'msg-templates')),
112 os.path.abspath(os.path.join(module_dir, '..', 'msg-templates')),
113 os.path.abspath('.')
114 ]
115 templates = options.templates
116 if templates:
117 default_templates.append(
118 os.path.abspath(os.path.expanduser(os.path.expandvars(templates)))
119 )
120
121 log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
122 logging.basicConfig(level=logging.DEBUG, format=log_format)
123 logging.getLogger('botocore').setLevel(logging.WARNING)
124
125 with open(options.config) as fh:
126 config = yaml.load(fh.read(), Loader=yaml.SafeLoader)
127
128 jsonschema.validate(config, CONFIG_SCHEMA)
129 setup_defaults(config)
130 config['templates_folders'] = default_templates
131
132 tester = MailerTester(
133 options.MESSAGE_FILE, config, msg_plain=options.plain,
134 json_dump_file=options.json_dump_file
135 )
136 tester.run(options.dry_run, options.print_only)
137
138
139 if __name__ == '__main__':
140 main()
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/c7n_mailer/c7n_mailer/replay.py b/tools/c7n_mailer/c7n_mailer/replay.py
--- a/tools/c7n_mailer/c7n_mailer/replay.py
+++ b/tools/c7n_mailer/c7n_mailer/replay.py
@@ -42,7 +42,7 @@
logger.debug('base64-decoding and zlib decompressing message')
raw = zlib.decompress(base64.b64decode(raw))
if json_dump_file is not None:
- with open(json_dump_file, 'w') as fh:
+ with open(json_dump_file, 'wb') as fh: # pragma: no cover
fh.write(raw)
self.data = json.loads(raw)
logger.debug('Loaded message JSON')
|
{"golden_diff": "diff --git a/tools/c7n_mailer/c7n_mailer/replay.py b/tools/c7n_mailer/c7n_mailer/replay.py\n--- a/tools/c7n_mailer/c7n_mailer/replay.py\n+++ b/tools/c7n_mailer/c7n_mailer/replay.py\n@@ -42,7 +42,7 @@\n logger.debug('base64-decoding and zlib decompressing message')\n raw = zlib.decompress(base64.b64decode(raw))\n if json_dump_file is not None:\n- with open(json_dump_file, 'w') as fh:\n+ with open(json_dump_file, 'wb') as fh: # pragma: no cover\n fh.write(raw)\n self.data = json.loads(raw)\n logger.debug('Loaded message JSON')\n", "issue": "c7n-mailer-replay : Python3 - TypeError: write() argument must be str, not bytes\nUsing c7n-mailer-replay under Python3 gives the following trace:\r\n```\r\nTraceback (most recent call last):\r\n File \"HOME/.pyenv/versions/cloud-custodian-3.6/bin/c7n-mailer-replay\", line 11, in <module>\r\n load_entry_point('c7n-mailer', 'console_scripts', 'c7n-mailer-replay')()\r\n File \"HOME/CLOUD_CUSTODIAN/SRC/tools/c7n_mailer/c7n_mailer/replay.py\", line 134, in main\r\n json_dump_file=options.json_dump_file\r\n File \"HOME/CLOUD_CUSTODIAN/SRC/tools/c7n_mailer/c7n_mailer/replay.py\", line 46, in __init__\r\n fh.write(raw)\r\nTypeError: write() argument must be str, not bytes\r\n```\r\n\r\nI had success with the following change:\r\n```diff\r\ndiff --git a/tools/c7n_mailer/c7n_mailer/replay.py b/tools/c7n_mailer/c7n_mailer/replay.py\r\nindex b3f5456be..72f63332f 100644\r\n--- a/tools/c7n_mailer/c7n_mailer/replay.py\r\n+++ b/tools/c7n_mailer/c7n_mailer/replay.py\r\n@@ -42,7 +42,7 @@ class MailerTester(object):\r\n logger.debug('base64-decoding and zlib decompressing message')\r\n raw = zlib.decompress(base64.b64decode(raw))\r\n if json_dump_file is not None:\r\n- with open(json_dump_file, 'w') as fh:\r\n+ with open(json_dump_file, 'wb') as fh:\r\n fh.write(raw)\r\n self.data = json.loads(raw)\r\n logger.debug('Loaded message JSON')\r\n```\r\n\r\nI believe it could be compatible with Python2 also, but it needs some testing.\n", "before_files": [{"content": "\"\"\"\nAllow local testing of mailer and templates by replaying an SQS message.\n\nMAILER_FILE input is a file containing the exact base64-encoded, gzipped\ndata that's enqueued to SQS via :py:meth:`c7n.actions.Notify.send_sqs`.\n\nAlternatively, with -p|--plain specified, the file will be assumed to be\nJSON data that can be loaded directly.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport boto3\nimport os\nimport logging\nimport zlib\nimport base64\nimport json\n\nimport jsonschema\nfrom ruamel import yaml\n\nfrom c7n_mailer.utils import setup_defaults\nfrom c7n_mailer.cli import CONFIG_SCHEMA\nfrom .email_delivery import EmailDelivery\n\nlogger = logging.getLogger(__name__)\n\n\nclass MailerTester(object):\n\n def __init__(self, msg_file, config, msg_plain=False, json_dump_file=None):\n if not os.path.exists(msg_file):\n raise RuntimeError(\"File does not exist: %s\" % msg_file)\n logger.debug('Reading message from: %s', msg_file)\n with open(msg_file, 'r') as fh:\n raw = fh.read()\n logger.debug('Read %d byte message', len(raw))\n if msg_plain:\n raw = raw.strip()\n else:\n logger.debug('base64-decoding and zlib decompressing message')\n raw = zlib.decompress(base64.b64decode(raw))\n if json_dump_file is not None:\n with open(json_dump_file, 'w') as fh:\n fh.write(raw)\n self.data = json.loads(raw)\n logger.debug('Loaded message JSON')\n self.config = config\n self.session = boto3.Session()\n\n def run(self, dry_run=False, print_only=False):\n emd = EmailDelivery(self.config, self.session, logger)\n addrs_to_msgs = emd.get_to_addrs_email_messages_map(self.data)\n logger.info('Would send email to: %s', addrs_to_msgs.keys())\n if print_only:\n mime = emd.get_mimetext_message(\n self.data, self.data['resources'], ['[email protected]']\n )\n logger.info('Send mail with subject: \"%s\"', mime['Subject'])\n print(mime.get_payload(None, True))\n return\n if dry_run:\n for to_addrs, mimetext_msg in addrs_to_msgs.items():\n print('-> SEND MESSAGE TO: %s' % '; '.join(to_addrs))\n print(mimetext_msg.get_payload(None, True))\n return\n # else actually send the message...\n for to_addrs, mimetext_msg in addrs_to_msgs.items():\n logger.info('Actually sending mail to: %s', to_addrs)\n emd.send_c7n_email(self.data, list(to_addrs), mimetext_msg)\n\n\ndef setup_parser():\n parser = argparse.ArgumentParser('Test c7n-mailer templates and mail')\n parser.add_argument('-c', '--config', required=True)\n parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',\n default=False,\n help='Log messages that would be sent, but do not send')\n parser.add_argument('-T', '--template-print', dest='print_only',\n action='store_true', default=False,\n help='Just print rendered templates')\n parser.add_argument('-t', '--templates', default=None, type=str,\n help='message templates folder location')\n parser.add_argument('-p', '--plain', dest='plain', action='store_true',\n default=False,\n help='Expect MESSAGE_FILE to be a plain string, '\n 'rather than the base64-encoded, gzipped SQS '\n 'message format')\n parser.add_argument('-j', '--json-dump-file', dest='json_dump_file',\n type=str, action='store', default=None,\n help='If dump JSON of MESSAGE_FILE to this path; '\n 'useful to base64-decode and gunzip a message')\n parser.add_argument('MESSAGE_FILE', type=str,\n help='Path to SQS message dump/content file')\n return parser\n\n\ndef session_factory(config):\n return boto3.Session(\n region_name=config['region'],\n profile_name=config.get('profile'))\n\n\ndef main():\n parser = setup_parser()\n options = parser.parse_args()\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n default_templates = [\n os.path.abspath(os.path.join(module_dir, 'msg-templates')),\n os.path.abspath(os.path.join(module_dir, '..', 'msg-templates')),\n os.path.abspath('.')\n ]\n templates = options.templates\n if templates:\n default_templates.append(\n os.path.abspath(os.path.expanduser(os.path.expandvars(templates)))\n )\n\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=log_format)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n\n with open(options.config) as fh:\n config = yaml.load(fh.read(), Loader=yaml.SafeLoader)\n\n jsonschema.validate(config, CONFIG_SCHEMA)\n setup_defaults(config)\n config['templates_folders'] = default_templates\n\n tester = MailerTester(\n options.MESSAGE_FILE, config, msg_plain=options.plain,\n json_dump_file=options.json_dump_file\n )\n tester.run(options.dry_run, options.print_only)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/c7n_mailer/c7n_mailer/replay.py"}], "after_files": [{"content": "\"\"\"\nAllow local testing of mailer and templates by replaying an SQS message.\n\nMAILER_FILE input is a file containing the exact base64-encoded, gzipped\ndata that's enqueued to SQS via :py:meth:`c7n.actions.Notify.send_sqs`.\n\nAlternatively, with -p|--plain specified, the file will be assumed to be\nJSON data that can be loaded directly.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport boto3\nimport os\nimport logging\nimport zlib\nimport base64\nimport json\n\nimport jsonschema\nfrom ruamel import yaml\n\nfrom c7n_mailer.utils import setup_defaults\nfrom c7n_mailer.cli import CONFIG_SCHEMA\nfrom .email_delivery import EmailDelivery\n\nlogger = logging.getLogger(__name__)\n\n\nclass MailerTester(object):\n\n def __init__(self, msg_file, config, msg_plain=False, json_dump_file=None):\n if not os.path.exists(msg_file):\n raise RuntimeError(\"File does not exist: %s\" % msg_file)\n logger.debug('Reading message from: %s', msg_file)\n with open(msg_file, 'r') as fh:\n raw = fh.read()\n logger.debug('Read %d byte message', len(raw))\n if msg_plain:\n raw = raw.strip()\n else:\n logger.debug('base64-decoding and zlib decompressing message')\n raw = zlib.decompress(base64.b64decode(raw))\n if json_dump_file is not None:\n with open(json_dump_file, 'wb') as fh: # pragma: no cover\n fh.write(raw)\n self.data = json.loads(raw)\n logger.debug('Loaded message JSON')\n self.config = config\n self.session = boto3.Session()\n\n def run(self, dry_run=False, print_only=False):\n emd = EmailDelivery(self.config, self.session, logger)\n addrs_to_msgs = emd.get_to_addrs_email_messages_map(self.data)\n logger.info('Would send email to: %s', addrs_to_msgs.keys())\n if print_only:\n mime = emd.get_mimetext_message(\n self.data, self.data['resources'], ['[email protected]']\n )\n logger.info('Send mail with subject: \"%s\"', mime['Subject'])\n print(mime.get_payload(None, True))\n return\n if dry_run:\n for to_addrs, mimetext_msg in addrs_to_msgs.items():\n print('-> SEND MESSAGE TO: %s' % '; '.join(to_addrs))\n print(mimetext_msg.get_payload(None, True))\n return\n # else actually send the message...\n for to_addrs, mimetext_msg in addrs_to_msgs.items():\n logger.info('Actually sending mail to: %s', to_addrs)\n emd.send_c7n_email(self.data, list(to_addrs), mimetext_msg)\n\n\ndef setup_parser():\n parser = argparse.ArgumentParser('Test c7n-mailer templates and mail')\n parser.add_argument('-c', '--config', required=True)\n parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true',\n default=False,\n help='Log messages that would be sent, but do not send')\n parser.add_argument('-T', '--template-print', dest='print_only',\n action='store_true', default=False,\n help='Just print rendered templates')\n parser.add_argument('-t', '--templates', default=None, type=str,\n help='message templates folder location')\n parser.add_argument('-p', '--plain', dest='plain', action='store_true',\n default=False,\n help='Expect MESSAGE_FILE to be a plain string, '\n 'rather than the base64-encoded, gzipped SQS '\n 'message format')\n parser.add_argument('-j', '--json-dump-file', dest='json_dump_file',\n type=str, action='store', default=None,\n help='If dump JSON of MESSAGE_FILE to this path; '\n 'useful to base64-decode and gunzip a message')\n parser.add_argument('MESSAGE_FILE', type=str,\n help='Path to SQS message dump/content file')\n return parser\n\n\ndef session_factory(config):\n return boto3.Session(\n region_name=config['region'],\n profile_name=config.get('profile'))\n\n\ndef main():\n parser = setup_parser()\n options = parser.parse_args()\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n default_templates = [\n os.path.abspath(os.path.join(module_dir, 'msg-templates')),\n os.path.abspath(os.path.join(module_dir, '..', 'msg-templates')),\n os.path.abspath('.')\n ]\n templates = options.templates\n if templates:\n default_templates.append(\n os.path.abspath(os.path.expanduser(os.path.expandvars(templates)))\n )\n\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.DEBUG, format=log_format)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n\n with open(options.config) as fh:\n config = yaml.load(fh.read(), Loader=yaml.SafeLoader)\n\n jsonschema.validate(config, CONFIG_SCHEMA)\n setup_defaults(config)\n config['templates_folders'] = default_templates\n\n tester = MailerTester(\n options.MESSAGE_FILE, config, msg_plain=options.plain,\n json_dump_file=options.json_dump_file\n )\n tester.run(options.dry_run, options.print_only)\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/c7n_mailer/c7n_mailer/replay.py"}]}
| 2,208 | 176 |
gh_patches_debug_17240
|
rasdani/github-patches
|
git_diff
|
napari__napari-6139
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Removing comments from PR does not work
## 🐛 Bug
After merging it looks like the action for removing comments does not work.
I will be happy to fast merge potential bugfix without the standard 24 hours as it needs to be merged to test.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/remove_html_comments_from_pr.py`
Content:
```
1 """
2 Edit pull request description to remove HTML comments
3
4 We might want to remove section with markdown task lists that are completely empty
5 """
6
7 import re
8 import sys
9 from os import environ
10
11 import requests
12
13
14 def remove_html_comments(text):
15 # Regular expression to remove HTML comments
16 # [^\S\r\n] is whitespace but not new line
17 html_comment_pattern = r"[^\S\r\n]*<!--(.*?)-->[^\S\r\n]*\n?"
18 return re.sub(html_comment_pattern, "", text, flags=re.DOTALL)
19
20
21 def edit_pull_request_description(repo, pull_request_number, access_token):
22 # GitHub API base URL
23 base_url = "https://api.github.com"
24
25 # Prepare the headers with the access token
26 headers = {"Authorization": f"token {access_token}"}
27
28 # Get the current pull request description
29 pr_url = f"{base_url}/repos/{repo}/pulls/{pull_request_number}"
30 response = requests.get(pr_url, headers=headers)
31 response.raise_for_status()
32 response_json = response.json()
33 current_description = response_json["body"]
34
35 # Remove HTML comments from the description
36 edited_description = remove_html_comments(current_description)
37 if edited_description == current_description:
38 print("No HTML comments found in the pull request description")
39 return
40
41 # Update the pull request description
42 update_pr_url = f"{base_url}/repos/{repo}/pulls/{pull_request_number}"
43 payload = {"body": edited_description}
44 response = requests.patch(update_pr_url, json=payload, headers=headers)
45 response.raise_for_status()
46
47 if response.status_code == 200:
48 print(
49 f"Pull request #{pull_request_number} description has been updated successfully!"
50 )
51 else:
52 print(
53 f"Failed to update pull request description. Status code: {response.status_code}"
54 )
55
56
57 if __name__ == "__main__":
58 # Replace with your repository and pull request number
59 # get cuurrent repository name from github actions
60 repository_name = environ.get("GITHUB_REPOSITORY")
61 if repository_name == "napari/napari":
62 sys.exit(0)
63
64 # get current PR number from github actions
65 github_ref = environ.get("GITHUB_REF")
66 refs, pull, number, merge = github_ref.split('/')
67 assert refs == 'refs'
68 assert pull == 'pull'
69 assert merge == 'merge'
70
71 # Replace with your GitHub access token
72 access_token = environ.get("GITHUB_TOKEN")
73
74 edit_pull_request_description(repository_name, number, access_token)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/remove_html_comments_from_pr.py b/tools/remove_html_comments_from_pr.py
--- a/tools/remove_html_comments_from_pr.py
+++ b/tools/remove_html_comments_from_pr.py
@@ -10,6 +10,8 @@
import requests
+REPO = 'napari/napari'
+
def remove_html_comments(text):
# Regular expression to remove HTML comments
@@ -55,10 +57,12 @@
if __name__ == "__main__":
+ print('Will inspect PR description to remove html comments.')
# Replace with your repository and pull request number
# get cuurrent repository name from github actions
repository_name = environ.get("GITHUB_REPOSITORY")
- if repository_name == "napari/napari":
+ if repository_name != REPO:
+ print('Not on main repo, aborting with success')
sys.exit(0)
# get current PR number from github actions
|
{"golden_diff": "diff --git a/tools/remove_html_comments_from_pr.py b/tools/remove_html_comments_from_pr.py\n--- a/tools/remove_html_comments_from_pr.py\n+++ b/tools/remove_html_comments_from_pr.py\n@@ -10,6 +10,8 @@\n \n import requests\n \n+REPO = 'napari/napari'\n+\n \n def remove_html_comments(text):\n # Regular expression to remove HTML comments\n@@ -55,10 +57,12 @@\n \n \n if __name__ == \"__main__\":\n+ print('Will inspect PR description to remove html comments.')\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n- if repository_name == \"napari/napari\":\n+ if repository_name != REPO:\n+ print('Not on main repo, aborting with success')\n sys.exit(0)\n \n # get current PR number from github actions\n", "issue": "Removing comments from PR does not work\n## \ud83d\udc1b Bug\r\nAfter merging it looks like the action for removing comments does not work. \r\n\r\nI will be happy to fast merge potential bugfix without the standard 24 hours as it needs to be merged to test. \r\n\n", "before_files": [{"content": "\"\"\"\nEdit pull request description to remove HTML comments\n\nWe might want to remove section with markdown task lists that are completely empty\n\"\"\"\n\nimport re\nimport sys\nfrom os import environ\n\nimport requests\n\n\ndef remove_html_comments(text):\n # Regular expression to remove HTML comments\n # [^\\S\\r\\n] is whitespace but not new line\n html_comment_pattern = r\"[^\\S\\r\\n]*<!--(.*?)-->[^\\S\\r\\n]*\\n?\"\n return re.sub(html_comment_pattern, \"\", text, flags=re.DOTALL)\n\n\ndef edit_pull_request_description(repo, pull_request_number, access_token):\n # GitHub API base URL\n base_url = \"https://api.github.com\"\n\n # Prepare the headers with the access token\n headers = {\"Authorization\": f\"token {access_token}\"}\n\n # Get the current pull request description\n pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n response = requests.get(pr_url, headers=headers)\n response.raise_for_status()\n response_json = response.json()\n current_description = response_json[\"body\"]\n\n # Remove HTML comments from the description\n edited_description = remove_html_comments(current_description)\n if edited_description == current_description:\n print(\"No HTML comments found in the pull request description\")\n return\n\n # Update the pull request description\n update_pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n payload = {\"body\": edited_description}\n response = requests.patch(update_pr_url, json=payload, headers=headers)\n response.raise_for_status()\n\n if response.status_code == 200:\n print(\n f\"Pull request #{pull_request_number} description has been updated successfully!\"\n )\n else:\n print(\n f\"Failed to update pull request description. Status code: {response.status_code}\"\n )\n\n\nif __name__ == \"__main__\":\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n if repository_name == \"napari/napari\":\n sys.exit(0)\n\n # get current PR number from github actions\n github_ref = environ.get(\"GITHUB_REF\")\n refs, pull, number, merge = github_ref.split('/')\n assert refs == 'refs'\n assert pull == 'pull'\n assert merge == 'merge'\n\n # Replace with your GitHub access token\n access_token = environ.get(\"GITHUB_TOKEN\")\n\n edit_pull_request_description(repository_name, number, access_token)\n", "path": "tools/remove_html_comments_from_pr.py"}], "after_files": [{"content": "\"\"\"\nEdit pull request description to remove HTML comments\n\nWe might want to remove section with markdown task lists that are completely empty\n\"\"\"\n\nimport re\nimport sys\nfrom os import environ\n\nimport requests\n\nREPO = 'napari/napari'\n\n\ndef remove_html_comments(text):\n # Regular expression to remove HTML comments\n # [^\\S\\r\\n] is whitespace but not new line\n html_comment_pattern = r\"[^\\S\\r\\n]*<!--(.*?)-->[^\\S\\r\\n]*\\n?\"\n return re.sub(html_comment_pattern, \"\", text, flags=re.DOTALL)\n\n\ndef edit_pull_request_description(repo, pull_request_number, access_token):\n # GitHub API base URL\n base_url = \"https://api.github.com\"\n\n # Prepare the headers with the access token\n headers = {\"Authorization\": f\"token {access_token}\"}\n\n # Get the current pull request description\n pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n response = requests.get(pr_url, headers=headers)\n response.raise_for_status()\n response_json = response.json()\n current_description = response_json[\"body\"]\n\n # Remove HTML comments from the description\n edited_description = remove_html_comments(current_description)\n if edited_description == current_description:\n print(\"No HTML comments found in the pull request description\")\n return\n\n # Update the pull request description\n update_pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n payload = {\"body\": edited_description}\n response = requests.patch(update_pr_url, json=payload, headers=headers)\n response.raise_for_status()\n\n if response.status_code == 200:\n print(\n f\"Pull request #{pull_request_number} description has been updated successfully!\"\n )\n else:\n print(\n f\"Failed to update pull request description. Status code: {response.status_code}\"\n )\n\n\nif __name__ == \"__main__\":\n print('Will inspect PR description to remove html comments.')\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n if repository_name != REPO:\n print('Not on main repo, aborting with success')\n sys.exit(0)\n\n # get current PR number from github actions\n github_ref = environ.get(\"GITHUB_REF\")\n refs, pull, number, merge = github_ref.split('/')\n assert refs == 'refs'\n assert pull == 'pull'\n assert merge == 'merge'\n\n # Replace with your GitHub access token\n access_token = environ.get(\"GITHUB_TOKEN\")\n\n edit_pull_request_description(repository_name, number, access_token)\n", "path": "tools/remove_html_comments_from_pr.py"}]}
| 1,016 | 207 |
gh_patches_debug_24889
|
rasdani/github-patches
|
git_diff
|
blaze__blaze-568
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nested transformations don't work
Transformations containing other transformations are not currently possible. They'll be implemented in #568. Here's an example of something that will work after that PR:
``` python
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux", "Windows"]}
df = DataFrame(d)
t = TableSymbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='{timestamp: datetime}'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='{date: datetime}'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
assert str(result) == str(df)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `blaze/compute/pandas.py`
Content:
```
1 """
2
3 >>> from blaze.expr.table import TableSymbol
4 >>> from blaze.compute.pandas import compute
5
6 >>> accounts = TableSymbol('accounts', '{name: string, amount: int}')
7 >>> deadbeats = accounts[accounts['amount'] < 0]['name']
8
9 >>> from pandas import DataFrame
10 >>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
11 >>> df = DataFrame(data, columns=['name', 'amount'])
12 >>> compute(deadbeats, df)
13 1 Bob
14 2 Charlie
15 Name: name, dtype: object
16 """
17 from __future__ import absolute_import, division, print_function
18
19 import pandas as pd
20 from pandas.core.generic import NDFrame
21 from pandas import DataFrame, Series
22 from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
23 import numpy as np
24 from collections import defaultdict
25
26 from ..api.into import into
27 from ..dispatch import dispatch
28 from ..expr import (Projection, Column, Sort, Head, ColumnWise, Selection,
29 Reduction, Distinct, Join, By, Summary, Label, ReLabel,
30 Map, Apply, Merge, Union, TableExpr)
31 from ..expr import UnaryOp, BinOp
32 from ..expr import TableSymbol, common_subexpression
33 from .core import compute, compute_one, base
34
35 __all__ = []
36
37
38 @dispatch(Projection, DataFrame)
39 def compute_one(t, df, **kwargs):
40 return df[list(t.columns)]
41
42
43 @dispatch(Column, (DataFrame, DataFrameGroupBy))
44 def compute_one(t, df, **kwargs):
45 return df[t.columns[0]]
46
47
48 @dispatch(Column, (Series, SeriesGroupBy))
49 def compute_one(_, s, **kwargs):
50 return s
51
52
53 @dispatch(ColumnWise, DataFrame)
54 def compute_one(t, df, **kwargs):
55 d = dict((t.child[c].scalar_symbol, df[c]) for c in t.child.columns)
56 return compute(t.expr, d)
57
58
59 @dispatch(ColumnWise, Series)
60 def compute_one(t, s, **kwargs):
61 return compute_one(t, s.to_frame(), **kwargs)
62
63
64 @dispatch(BinOp, Series, (Series, base))
65 def compute_one(t, lhs, rhs, **kwargs):
66 return t.op(lhs, rhs)
67
68
69 @dispatch(BinOp, (Series, base), Series)
70 def compute_one(t, lhs, rhs, **kwargs):
71 return t.op(lhs, rhs)
72
73
74 @dispatch(UnaryOp, NDFrame)
75 def compute_one(t, df, **kwargs):
76 f = getattr(t, 'op', getattr(np, t.symbol, None))
77 if f is None:
78 raise ValueError('%s is not a valid operation on %s objects' %
79 (t.symbol, type(df).__name__))
80 return f(df)
81
82
83 @dispatch(Selection, (Series, DataFrame))
84 def compute_one(t, df, **kwargs):
85 predicate = compute(t.predicate, {t.child: df})
86 return df[predicate]
87
88
89 @dispatch(TableSymbol, DataFrame)
90 def compute_one(t, df, **kwargs):
91 if not list(t.columns) == list(df.columns):
92 # TODO also check dtype
93 raise ValueError("Schema mismatch: \n\nTable:\n%s\n\nDataFrame:\n%s"
94 % (t, df))
95 return df
96
97
98 @dispatch(Join, DataFrame, DataFrame)
99 def compute_one(t, lhs, rhs, **kwargs):
100 """ Join two pandas data frames on arbitrary columns
101
102 The approach taken here could probably be improved.
103
104 To join on two columns we force each column to be the index of the
105 dataframe, perform the join, and then reset the index back to the left
106 side's original index.
107 """
108 result = pd.merge(lhs, rhs,
109 left_on=t.on_left, right_on=t.on_right,
110 how=t.how)
111 return result.reset_index()[t.columns]
112
113
114 @dispatch(TableSymbol, (DataFrameGroupBy, SeriesGroupBy))
115 def compute_one(t, gb, **kwargs):
116 return gb
117
118
119 @dispatch(Reduction, (DataFrame, DataFrameGroupBy))
120 def compute_one(t, df, **kwargs):
121 return getattr(df, t.symbol)()
122
123
124 @dispatch(Reduction, (SeriesGroupBy, Series))
125 def compute_one(t, s, **kwargs):
126 result = getattr(s, t.symbol)()
127
128 # pandas may return an int, numpy scalar or non scalar here so we need to
129 # program defensively so that things are JSON serializable
130 try:
131 return result.item()
132 except (AttributeError, ValueError):
133 return result
134
135
136 @dispatch(Distinct, DataFrame)
137 def compute_one(t, df, **kwargs):
138 return df.drop_duplicates()
139
140
141 @dispatch(Distinct, Series)
142 def compute_one(t, s, **kwargs):
143 s2 = Series(s.unique())
144 s2.name = s.name
145 return s2
146
147
148 def unpack(seq):
149 """ Unpack sequence of length one
150
151 >>> unpack([1, 2, 3])
152 [1, 2, 3]
153
154 >>> unpack([1])
155 1
156 """
157 seq = list(seq)
158 if len(seq) == 1:
159 seq = seq[0]
160 return seq
161
162
163 Grouper = Column, ColumnWise, Series, list
164
165
166 @dispatch(By, list, DataFrame)
167 def get_grouper(c, grouper, df):
168 return grouper
169
170
171 @dispatch(By, (Column, ColumnWise, Series), NDFrame)
172 def get_grouper(c, grouper, df):
173 return compute(grouper, {c.child: df})
174
175
176 @dispatch(By, Projection, NDFrame)
177 def get_grouper(c, grouper, df):
178 return grouper.columns
179
180
181 @dispatch(By, Reduction, Grouper, NDFrame)
182 def compute_by(t, r, g, df):
183 names = r.dshape[0].names
184 preapply = compute(r.child, {t.child: df})
185
186 # Pandas and Blaze column naming schemes differ
187 # Coerce DataFrame column names to match Blaze's names
188 preapply = preapply.copy()
189 if isinstance(preapply, Series):
190 preapply.name = names[0]
191 else:
192 preapply.columns = names
193 group_df = concat_nodup(df, preapply)
194
195 gb = group_df.groupby(g)
196 groups = gb[names[0] if t.apply.child.iscolumn else names]
197
198 return compute_one(r, groups) # do reduction
199
200
201 @dispatch(By, Summary, Grouper, NDFrame)
202 def compute_by(t, s, g, df):
203 names = s.names
204 preapply = DataFrame(dict(zip(names,
205 (compute(v.child, {t.child: df})
206 for v in s.values))))
207
208 df2 = concat_nodup(df, preapply)
209
210 groups = df2.groupby(g)
211
212 d = defaultdict(list)
213 for name, v in zip(names, s.values):
214 d[name].append(getattr(Series, v.symbol))
215
216 result = groups.agg(dict(d))
217
218 # Rearrange columns to match names order
219 result = result[sorted(result.columns, key=lambda t: names.index(t[0]))]
220 result.columns = t.apply.names # flatten down multiindex
221 return result
222
223
224 @dispatch(TableExpr, DataFrame)
225 def post_compute_by(t, df):
226 return df.reset_index(drop=True)
227
228
229 @dispatch((Summary, Reduction), DataFrame)
230 def post_compute_by(t, df):
231 return df.reset_index()
232
233
234 @dispatch(By, NDFrame)
235 def compute_one(t, df, **kwargs):
236 grouper = get_grouper(t, t.grouper, df)
237 result = compute_by(t, t.apply, grouper, df)
238 return post_compute_by(t.apply, into(DataFrame, result))
239
240
241 def concat_nodup(a, b):
242 """ Concatenate two dataframes/series without duplicately named columns
243
244
245 >>> df = DataFrame([[1, 'Alice', 100],
246 ... [2, 'Bob', -200],
247 ... [3, 'Charlie', 300]],
248 ... columns=['id','name', 'amount'])
249
250 >>> concat_nodup(df, df)
251 id name amount
252 0 1 Alice 100
253 1 2 Bob -200
254 2 3 Charlie 300
255
256
257 >>> concat_nodup(df.name, df.amount)
258 name amount
259 0 Alice 100
260 1 Bob -200
261 2 Charlie 300
262
263
264
265 >>> concat_nodup(df, df.amount + df.id)
266 id name amount 0
267 0 1 Alice 100 101
268 1 2 Bob -200 -198
269 2 3 Charlie 300 303
270 """
271
272 if isinstance(a, DataFrame) and isinstance(b, DataFrame):
273 return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],
274 axis=1)
275 if isinstance(a, DataFrame) and isinstance(b, Series):
276 if b.name not in a.columns:
277 return pd.concat([a, b], axis=1)
278 else:
279 return a
280 if isinstance(a, Series) and isinstance(b, DataFrame):
281 return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)
282 if isinstance(a, Series) and isinstance(b, Series):
283 if a.name == b.name:
284 return a
285 else:
286 return pd.concat([a, b], axis=1)
287
288
289 @dispatch(Sort, DataFrame)
290 def compute_one(t, df, **kwargs):
291 return df.sort(t.key, ascending=t.ascending)
292
293
294 @dispatch(Sort, Series)
295 def compute_one(t, s, **kwargs):
296 return s.order(ascending=t.ascending)
297
298
299 @dispatch(Head, (Series, DataFrame))
300 def compute_one(t, df, **kwargs):
301 return df.head(t.n)
302
303
304 @dispatch(Label, DataFrame)
305 def compute_one(t, df, **kwargs):
306 return DataFrame(df, columns=[t.label])
307
308
309 @dispatch(Label, Series)
310 def compute_one(t, df, **kwargs):
311 return Series(df, name=t.label)
312
313
314 @dispatch(ReLabel, DataFrame)
315 def compute_one(t, df, **kwargs):
316 return df.rename(columns=dict(t.labels))
317
318
319 @dispatch(ReLabel, Series)
320 def compute_one(t, s, **kwargs):
321 labels = t.labels
322 if len(labels) > 1:
323 raise ValueError('You can only relabel a Series with a single name')
324 pair, = labels
325 _, replacement = pair
326 return Series(s, name=replacement)
327
328
329 @dispatch(Map, DataFrame)
330 def compute_one(t, df, **kwargs):
331 return df.apply(lambda tup: t.func(*tup), axis=1)
332
333
334 @dispatch(Map, Series)
335 def compute_one(t, df, **kwargs):
336 return df.map(t.func)
337
338
339 @dispatch(Apply, (Series, DataFrame))
340 def compute_one(t, df, **kwargs):
341 return t.func(df)
342
343
344 @dispatch(Merge, DataFrame)
345 def compute_one(t, df, **kwargs):
346 subexpression = common_subexpression(*t.children)
347 children = [compute(child, {subexpression: df}) for child in t.children]
348 return pd.concat(children, axis=1)
349
350
351 @dispatch(Union, DataFrame, tuple)
352 def compute_one(t, example, children, **kwargs):
353 return pd.concat(children, axis=0)
354
355
356 @dispatch(Summary, DataFrame)
357 def compute_one(expr, data, **kwargs):
358 return Series(dict(zip(expr.names, [compute(val, {expr.child: data})
359 for val in expr.values])))
360
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/blaze/compute/pandas.py b/blaze/compute/pandas.py
--- a/blaze/compute/pandas.py
+++ b/blaze/compute/pandas.py
@@ -22,6 +22,7 @@
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
import numpy as np
from collections import defaultdict
+from toolz import merge as merge_dicts
from ..api.into import into
from ..dispatch import dispatch
@@ -333,7 +334,13 @@
@dispatch(Map, Series)
def compute_one(t, df, **kwargs):
- return df.map(t.func)
+ result = df.map(t.func)
+ try:
+ result.name = t.name
+ except NotImplementedError:
+ # We don't have a schema, but we should still be able to map
+ result.name = df.name
+ return result
@dispatch(Apply, (Series, DataFrame))
@@ -341,10 +348,11 @@
return t.func(df)
-@dispatch(Merge, DataFrame)
-def compute_one(t, df, **kwargs):
+@dispatch(Merge, NDFrame)
+def compute_one(t, df, scope=None, **kwargs):
subexpression = common_subexpression(*t.children)
- children = [compute(child, {subexpression: df}) for child in t.children]
+ scope = merge_dicts(scope or {}, {subexpression: df})
+ children = [compute(child, scope) for child in t.children]
return pd.concat(children, axis=1)
|
{"golden_diff": "diff --git a/blaze/compute/pandas.py b/blaze/compute/pandas.py\n--- a/blaze/compute/pandas.py\n+++ b/blaze/compute/pandas.py\n@@ -22,6 +22,7 @@\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n import numpy as np\n from collections import defaultdict\n+from toolz import merge as merge_dicts\n \n from ..api.into import into\n from ..dispatch import dispatch\n@@ -333,7 +334,13 @@\n \n @dispatch(Map, Series)\n def compute_one(t, df, **kwargs):\n- return df.map(t.func)\n+ result = df.map(t.func)\n+ try:\n+ result.name = t.name\n+ except NotImplementedError:\n+ # We don't have a schema, but we should still be able to map\n+ result.name = df.name\n+ return result\n \n \n @dispatch(Apply, (Series, DataFrame))\n@@ -341,10 +348,11 @@\n return t.func(df)\n \n \n-@dispatch(Merge, DataFrame)\n-def compute_one(t, df, **kwargs):\n+@dispatch(Merge, NDFrame)\n+def compute_one(t, df, scope=None, **kwargs):\n subexpression = common_subexpression(*t.children)\n- children = [compute(child, {subexpression: df}) for child in t.children]\n+ scope = merge_dicts(scope or {}, {subexpression: df})\n+ children = [compute(child, scope) for child in t.children]\n return pd.concat(children, axis=1)\n", "issue": "Nested transformations don't work\nTransformations containing other transformations are not currently possible. They'll be implemented in #568. Here's an example of something that will work after that PR:\n\n``` python\nd = {'timestamp': [1379613528, 1379620047], 'platform': [\"Linux\", \"Windows\"]}\ndf = DataFrame(d)\nt = TableSymbol('t', discover(df))\nt = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,\n schema='{timestamp: datetime}'))\nexpr = transform(t, date=t.timestamp.map(lambda x: x.date(),\n schema='{date: datetime}'))\nresult = compute(expr, df)\ndf['timestamp'] = df.timestamp.map(datetime.fromtimestamp)\ndf['date'] = df.timestamp.map(lambda x: x.date())\nassert str(result) == str(df)\n```\n\n", "before_files": [{"content": "\"\"\"\n\n>>> from blaze.expr.table import TableSymbol\n>>> from blaze.compute.pandas import compute\n\n>>> accounts = TableSymbol('accounts', '{name: string, amount: int}')\n>>> deadbeats = accounts[accounts['amount'] < 0]['name']\n\n>>> from pandas import DataFrame\n>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]\n>>> df = DataFrame(data, columns=['name', 'amount'])\n>>> compute(deadbeats, df)\n1 Bob\n2 Charlie\nName: name, dtype: object\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport pandas as pd\nfrom pandas.core.generic import NDFrame\nfrom pandas import DataFrame, Series\nfrom pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\nimport numpy as np\nfrom collections import defaultdict\n\nfrom ..api.into import into\nfrom ..dispatch import dispatch\nfrom ..expr import (Projection, Column, Sort, Head, ColumnWise, Selection,\n Reduction, Distinct, Join, By, Summary, Label, ReLabel,\n Map, Apply, Merge, Union, TableExpr)\nfrom ..expr import UnaryOp, BinOp\nfrom ..expr import TableSymbol, common_subexpression\nfrom .core import compute, compute_one, base\n\n__all__ = []\n\n\n@dispatch(Projection, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df[list(t.columns)]\n\n\n@dispatch(Column, (DataFrame, DataFrameGroupBy))\ndef compute_one(t, df, **kwargs):\n return df[t.columns[0]]\n\n\n@dispatch(Column, (Series, SeriesGroupBy))\ndef compute_one(_, s, **kwargs):\n return s\n\n\n@dispatch(ColumnWise, DataFrame)\ndef compute_one(t, df, **kwargs):\n d = dict((t.child[c].scalar_symbol, df[c]) for c in t.child.columns)\n return compute(t.expr, d)\n\n\n@dispatch(ColumnWise, Series)\ndef compute_one(t, s, **kwargs):\n return compute_one(t, s.to_frame(), **kwargs)\n\n\n@dispatch(BinOp, Series, (Series, base))\ndef compute_one(t, lhs, rhs, **kwargs):\n return t.op(lhs, rhs)\n\n\n@dispatch(BinOp, (Series, base), Series)\ndef compute_one(t, lhs, rhs, **kwargs):\n return t.op(lhs, rhs)\n\n\n@dispatch(UnaryOp, NDFrame)\ndef compute_one(t, df, **kwargs):\n f = getattr(t, 'op', getattr(np, t.symbol, None))\n if f is None:\n raise ValueError('%s is not a valid operation on %s objects' %\n (t.symbol, type(df).__name__))\n return f(df)\n\n\n@dispatch(Selection, (Series, DataFrame))\ndef compute_one(t, df, **kwargs):\n predicate = compute(t.predicate, {t.child: df})\n return df[predicate]\n\n\n@dispatch(TableSymbol, DataFrame)\ndef compute_one(t, df, **kwargs):\n if not list(t.columns) == list(df.columns):\n # TODO also check dtype\n raise ValueError(\"Schema mismatch: \\n\\nTable:\\n%s\\n\\nDataFrame:\\n%s\"\n % (t, df))\n return df\n\n\n@dispatch(Join, DataFrame, DataFrame)\ndef compute_one(t, lhs, rhs, **kwargs):\n \"\"\" Join two pandas data frames on arbitrary columns\n\n The approach taken here could probably be improved.\n\n To join on two columns we force each column to be the index of the\n dataframe, perform the join, and then reset the index back to the left\n side's original index.\n \"\"\"\n result = pd.merge(lhs, rhs,\n left_on=t.on_left, right_on=t.on_right,\n how=t.how)\n return result.reset_index()[t.columns]\n\n\n@dispatch(TableSymbol, (DataFrameGroupBy, SeriesGroupBy))\ndef compute_one(t, gb, **kwargs):\n return gb\n\n\n@dispatch(Reduction, (DataFrame, DataFrameGroupBy))\ndef compute_one(t, df, **kwargs):\n return getattr(df, t.symbol)()\n\n\n@dispatch(Reduction, (SeriesGroupBy, Series))\ndef compute_one(t, s, **kwargs):\n result = getattr(s, t.symbol)()\n\n # pandas may return an int, numpy scalar or non scalar here so we need to\n # program defensively so that things are JSON serializable\n try:\n return result.item()\n except (AttributeError, ValueError):\n return result\n\n\n@dispatch(Distinct, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.drop_duplicates()\n\n\n@dispatch(Distinct, Series)\ndef compute_one(t, s, **kwargs):\n s2 = Series(s.unique())\n s2.name = s.name\n return s2\n\n\ndef unpack(seq):\n \"\"\" Unpack sequence of length one\n\n >>> unpack([1, 2, 3])\n [1, 2, 3]\n\n >>> unpack([1])\n 1\n \"\"\"\n seq = list(seq)\n if len(seq) == 1:\n seq = seq[0]\n return seq\n\n\nGrouper = Column, ColumnWise, Series, list\n\n\n@dispatch(By, list, DataFrame)\ndef get_grouper(c, grouper, df):\n return grouper\n\n\n@dispatch(By, (Column, ColumnWise, Series), NDFrame)\ndef get_grouper(c, grouper, df):\n return compute(grouper, {c.child: df})\n\n\n@dispatch(By, Projection, NDFrame)\ndef get_grouper(c, grouper, df):\n return grouper.columns\n\n\n@dispatch(By, Reduction, Grouper, NDFrame)\ndef compute_by(t, r, g, df):\n names = r.dshape[0].names\n preapply = compute(r.child, {t.child: df})\n\n # Pandas and Blaze column naming schemes differ\n # Coerce DataFrame column names to match Blaze's names\n preapply = preapply.copy()\n if isinstance(preapply, Series):\n preapply.name = names[0]\n else:\n preapply.columns = names\n group_df = concat_nodup(df, preapply)\n\n gb = group_df.groupby(g)\n groups = gb[names[0] if t.apply.child.iscolumn else names]\n\n return compute_one(r, groups) # do reduction\n\n\n@dispatch(By, Summary, Grouper, NDFrame)\ndef compute_by(t, s, g, df):\n names = s.names\n preapply = DataFrame(dict(zip(names,\n (compute(v.child, {t.child: df})\n for v in s.values))))\n\n df2 = concat_nodup(df, preapply)\n\n groups = df2.groupby(g)\n\n d = defaultdict(list)\n for name, v in zip(names, s.values):\n d[name].append(getattr(Series, v.symbol))\n\n result = groups.agg(dict(d))\n\n # Rearrange columns to match names order\n result = result[sorted(result.columns, key=lambda t: names.index(t[0]))]\n result.columns = t.apply.names # flatten down multiindex\n return result\n\n\n@dispatch(TableExpr, DataFrame)\ndef post_compute_by(t, df):\n return df.reset_index(drop=True)\n\n\n@dispatch((Summary, Reduction), DataFrame)\ndef post_compute_by(t, df):\n return df.reset_index()\n\n\n@dispatch(By, NDFrame)\ndef compute_one(t, df, **kwargs):\n grouper = get_grouper(t, t.grouper, df)\n result = compute_by(t, t.apply, grouper, df)\n return post_compute_by(t.apply, into(DataFrame, result))\n\n\ndef concat_nodup(a, b):\n \"\"\" Concatenate two dataframes/series without duplicately named columns\n\n\n >>> df = DataFrame([[1, 'Alice', 100],\n ... [2, 'Bob', -200],\n ... [3, 'Charlie', 300]],\n ... columns=['id','name', 'amount'])\n\n >>> concat_nodup(df, df)\n id name amount\n 0 1 Alice 100\n 1 2 Bob -200\n 2 3 Charlie 300\n\n\n >>> concat_nodup(df.name, df.amount)\n name amount\n 0 Alice 100\n 1 Bob -200\n 2 Charlie 300\n\n\n\n >>> concat_nodup(df, df.amount + df.id)\n id name amount 0\n 0 1 Alice 100 101\n 1 2 Bob -200 -198\n 2 3 Charlie 300 303\n \"\"\"\n\n if isinstance(a, DataFrame) and isinstance(b, DataFrame):\n return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],\n axis=1)\n if isinstance(a, DataFrame) and isinstance(b, Series):\n if b.name not in a.columns:\n return pd.concat([a, b], axis=1)\n else:\n return a\n if isinstance(a, Series) and isinstance(b, DataFrame):\n return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)\n if isinstance(a, Series) and isinstance(b, Series):\n if a.name == b.name:\n return a\n else:\n return pd.concat([a, b], axis=1)\n\n\n@dispatch(Sort, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.sort(t.key, ascending=t.ascending)\n\n\n@dispatch(Sort, Series)\ndef compute_one(t, s, **kwargs):\n return s.order(ascending=t.ascending)\n\n\n@dispatch(Head, (Series, DataFrame))\ndef compute_one(t, df, **kwargs):\n return df.head(t.n)\n\n\n@dispatch(Label, DataFrame)\ndef compute_one(t, df, **kwargs):\n return DataFrame(df, columns=[t.label])\n\n\n@dispatch(Label, Series)\ndef compute_one(t, df, **kwargs):\n return Series(df, name=t.label)\n\n\n@dispatch(ReLabel, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.rename(columns=dict(t.labels))\n\n\n@dispatch(ReLabel, Series)\ndef compute_one(t, s, **kwargs):\n labels = t.labels\n if len(labels) > 1:\n raise ValueError('You can only relabel a Series with a single name')\n pair, = labels\n _, replacement = pair\n return Series(s, name=replacement)\n\n\n@dispatch(Map, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.apply(lambda tup: t.func(*tup), axis=1)\n\n\n@dispatch(Map, Series)\ndef compute_one(t, df, **kwargs):\n return df.map(t.func)\n\n\n@dispatch(Apply, (Series, DataFrame))\ndef compute_one(t, df, **kwargs):\n return t.func(df)\n\n\n@dispatch(Merge, DataFrame)\ndef compute_one(t, df, **kwargs):\n subexpression = common_subexpression(*t.children)\n children = [compute(child, {subexpression: df}) for child in t.children]\n return pd.concat(children, axis=1)\n\n\n@dispatch(Union, DataFrame, tuple)\ndef compute_one(t, example, children, **kwargs):\n return pd.concat(children, axis=0)\n\n\n@dispatch(Summary, DataFrame)\ndef compute_one(expr, data, **kwargs):\n return Series(dict(zip(expr.names, [compute(val, {expr.child: data})\n for val in expr.values])))\n", "path": "blaze/compute/pandas.py"}], "after_files": [{"content": "\"\"\"\n\n>>> from blaze.expr.table import TableSymbol\n>>> from blaze.compute.pandas import compute\n\n>>> accounts = TableSymbol('accounts', '{name: string, amount: int}')\n>>> deadbeats = accounts[accounts['amount'] < 0]['name']\n\n>>> from pandas import DataFrame\n>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]\n>>> df = DataFrame(data, columns=['name', 'amount'])\n>>> compute(deadbeats, df)\n1 Bob\n2 Charlie\nName: name, dtype: object\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport pandas as pd\nfrom pandas.core.generic import NDFrame\nfrom pandas import DataFrame, Series\nfrom pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\nimport numpy as np\nfrom collections import defaultdict\nfrom toolz import merge as merge_dicts\n\nfrom ..api.into import into\nfrom ..dispatch import dispatch\nfrom ..expr import (Projection, Column, Sort, Head, ColumnWise, Selection,\n Reduction, Distinct, Join, By, Summary, Label, ReLabel,\n Map, Apply, Merge, Union, TableExpr)\nfrom ..expr import UnaryOp, BinOp\nfrom ..expr import TableSymbol, common_subexpression\nfrom .core import compute, compute_one, base\n\n__all__ = []\n\n\n@dispatch(Projection, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df[list(t.columns)]\n\n\n@dispatch(Column, (DataFrame, DataFrameGroupBy))\ndef compute_one(t, df, **kwargs):\n return df[t.columns[0]]\n\n\n@dispatch(Column, (Series, SeriesGroupBy))\ndef compute_one(_, s, **kwargs):\n return s\n\n\n@dispatch(ColumnWise, DataFrame)\ndef compute_one(t, df, **kwargs):\n d = dict((t.child[c].scalar_symbol, df[c]) for c in t.child.columns)\n return compute(t.expr, d)\n\n\n@dispatch(ColumnWise, Series)\ndef compute_one(t, s, **kwargs):\n return compute_one(t, s.to_frame(), **kwargs)\n\n\n@dispatch(BinOp, Series, (Series, base))\ndef compute_one(t, lhs, rhs, **kwargs):\n return t.op(lhs, rhs)\n\n\n@dispatch(BinOp, (Series, base), Series)\ndef compute_one(t, lhs, rhs, **kwargs):\n return t.op(lhs, rhs)\n\n\n@dispatch(UnaryOp, NDFrame)\ndef compute_one(t, df, **kwargs):\n f = getattr(t, 'op', getattr(np, t.symbol, None))\n if f is None:\n raise ValueError('%s is not a valid operation on %s objects' %\n (t.symbol, type(df).__name__))\n return f(df)\n\n\n@dispatch(Selection, (Series, DataFrame))\ndef compute_one(t, df, **kwargs):\n predicate = compute(t.predicate, {t.child: df})\n return df[predicate]\n\n\n@dispatch(TableSymbol, DataFrame)\ndef compute_one(t, df, **kwargs):\n if not list(t.columns) == list(df.columns):\n # TODO also check dtype\n raise ValueError(\"Schema mismatch: \\n\\nTable:\\n%s\\n\\nDataFrame:\\n%s\"\n % (t, df))\n return df\n\n\n@dispatch(Join, DataFrame, DataFrame)\ndef compute_one(t, lhs, rhs, **kwargs):\n \"\"\" Join two pandas data frames on arbitrary columns\n\n The approach taken here could probably be improved.\n\n To join on two columns we force each column to be the index of the\n dataframe, perform the join, and then reset the index back to the left\n side's original index.\n \"\"\"\n result = pd.merge(lhs, rhs,\n left_on=t.on_left, right_on=t.on_right,\n how=t.how)\n return result.reset_index()[t.columns]\n\n\n@dispatch(TableSymbol, (DataFrameGroupBy, SeriesGroupBy))\ndef compute_one(t, gb, **kwargs):\n return gb\n\n\n@dispatch(Reduction, (DataFrame, DataFrameGroupBy))\ndef compute_one(t, df, **kwargs):\n return getattr(df, t.symbol)()\n\n\n@dispatch(Reduction, (SeriesGroupBy, Series))\ndef compute_one(t, s, **kwargs):\n result = getattr(s, t.symbol)()\n\n # pandas may return an int, numpy scalar or non scalar here so we need to\n # program defensively so that things are JSON serializable\n try:\n return result.item()\n except (AttributeError, ValueError):\n return result\n\n\n@dispatch(Distinct, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.drop_duplicates()\n\n\n@dispatch(Distinct, Series)\ndef compute_one(t, s, **kwargs):\n s2 = Series(s.unique())\n s2.name = s.name\n return s2\n\n\ndef unpack(seq):\n \"\"\" Unpack sequence of length one\n\n >>> unpack([1, 2, 3])\n [1, 2, 3]\n\n >>> unpack([1])\n 1\n \"\"\"\n seq = list(seq)\n if len(seq) == 1:\n seq = seq[0]\n return seq\n\n\nGrouper = Column, ColumnWise, Series, list\n\n\n@dispatch(By, list, DataFrame)\ndef get_grouper(c, grouper, df):\n return grouper\n\n\n@dispatch(By, (Column, ColumnWise, Series), NDFrame)\ndef get_grouper(c, grouper, df):\n return compute(grouper, {c.child: df})\n\n\n@dispatch(By, Projection, NDFrame)\ndef get_grouper(c, grouper, df):\n return grouper.columns\n\n\n@dispatch(By, Reduction, Grouper, NDFrame)\ndef compute_by(t, r, g, df):\n names = r.dshape[0].names\n preapply = compute(r.child, {t.child: df})\n\n # Pandas and Blaze column naming schemes differ\n # Coerce DataFrame column names to match Blaze's names\n preapply = preapply.copy()\n if isinstance(preapply, Series):\n preapply.name = names[0]\n else:\n preapply.columns = names\n group_df = concat_nodup(df, preapply)\n\n gb = group_df.groupby(g)\n groups = gb[names[0] if t.apply.child.iscolumn else names]\n\n return compute_one(r, groups) # do reduction\n\n\n@dispatch(By, Summary, Grouper, NDFrame)\ndef compute_by(t, s, g, df):\n names = s.names\n preapply = DataFrame(dict(zip(names,\n (compute(v.child, {t.child: df})\n for v in s.values))))\n\n df2 = concat_nodup(df, preapply)\n\n groups = df2.groupby(g)\n\n d = defaultdict(list)\n for name, v in zip(names, s.values):\n d[name].append(getattr(Series, v.symbol))\n\n result = groups.agg(dict(d))\n\n # Rearrange columns to match names order\n result = result[sorted(result.columns, key=lambda t: names.index(t[0]))]\n result.columns = t.apply.names # flatten down multiindex\n return result\n\n\n@dispatch(TableExpr, DataFrame)\ndef post_compute_by(t, df):\n return df.reset_index(drop=True)\n\n\n@dispatch((Summary, Reduction), DataFrame)\ndef post_compute_by(t, df):\n return df.reset_index()\n\n\n@dispatch(By, NDFrame)\ndef compute_one(t, df, **kwargs):\n grouper = get_grouper(t, t.grouper, df)\n result = compute_by(t, t.apply, grouper, df)\n return post_compute_by(t.apply, into(DataFrame, result))\n\n\ndef concat_nodup(a, b):\n \"\"\" Concatenate two dataframes/series without duplicately named columns\n\n\n >>> df = DataFrame([[1, 'Alice', 100],\n ... [2, 'Bob', -200],\n ... [3, 'Charlie', 300]],\n ... columns=['id','name', 'amount'])\n\n >>> concat_nodup(df, df)\n id name amount\n 0 1 Alice 100\n 1 2 Bob -200\n 2 3 Charlie 300\n\n\n >>> concat_nodup(df.name, df.amount)\n name amount\n 0 Alice 100\n 1 Bob -200\n 2 Charlie 300\n\n\n\n >>> concat_nodup(df, df.amount + df.id)\n id name amount 0\n 0 1 Alice 100 101\n 1 2 Bob -200 -198\n 2 3 Charlie 300 303\n \"\"\"\n\n if isinstance(a, DataFrame) and isinstance(b, DataFrame):\n return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],\n axis=1)\n if isinstance(a, DataFrame) and isinstance(b, Series):\n if b.name not in a.columns:\n return pd.concat([a, b], axis=1)\n else:\n return a\n if isinstance(a, Series) and isinstance(b, DataFrame):\n return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)\n if isinstance(a, Series) and isinstance(b, Series):\n if a.name == b.name:\n return a\n else:\n return pd.concat([a, b], axis=1)\n\n\n@dispatch(Sort, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.sort(t.key, ascending=t.ascending)\n\n\n@dispatch(Sort, Series)\ndef compute_one(t, s, **kwargs):\n return s.order(ascending=t.ascending)\n\n\n@dispatch(Head, (Series, DataFrame))\ndef compute_one(t, df, **kwargs):\n return df.head(t.n)\n\n\n@dispatch(Label, DataFrame)\ndef compute_one(t, df, **kwargs):\n return DataFrame(df, columns=[t.label])\n\n\n@dispatch(Label, Series)\ndef compute_one(t, df, **kwargs):\n return Series(df, name=t.label)\n\n\n@dispatch(ReLabel, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.rename(columns=dict(t.labels))\n\n\n@dispatch(ReLabel, Series)\ndef compute_one(t, s, **kwargs):\n labels = t.labels\n if len(labels) > 1:\n raise ValueError('You can only relabel a Series with a single name')\n pair, = labels\n _, replacement = pair\n return Series(s, name=replacement)\n\n\n@dispatch(Map, DataFrame)\ndef compute_one(t, df, **kwargs):\n return df.apply(lambda tup: t.func(*tup), axis=1)\n\n\n@dispatch(Map, Series)\ndef compute_one(t, df, **kwargs):\n result = df.map(t.func)\n try:\n result.name = t.name\n except NotImplementedError:\n # We don't have a schema, but we should still be able to map\n result.name = df.name\n return result\n\n\n@dispatch(Apply, (Series, DataFrame))\ndef compute_one(t, df, **kwargs):\n return t.func(df)\n\n\n@dispatch(Merge, NDFrame)\ndef compute_one(t, df, scope=None, **kwargs):\n subexpression = common_subexpression(*t.children)\n scope = merge_dicts(scope or {}, {subexpression: df})\n children = [compute(child, scope) for child in t.children]\n return pd.concat(children, axis=1)\n\n\n@dispatch(Union, DataFrame, tuple)\ndef compute_one(t, example, children, **kwargs):\n return pd.concat(children, axis=0)\n\n\n@dispatch(Summary, DataFrame)\ndef compute_one(expr, data, **kwargs):\n return Series(dict(zip(expr.names, [compute(val, {expr.child: data})\n for val in expr.values])))\n", "path": "blaze/compute/pandas.py"}]}
| 4,027 | 343 |
gh_patches_debug_18479
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1744
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
How could I reduce number of workers?
Could I reduce the number_of_workers?
---------
I run the doccano in my machine use this code.
```
doccano init
doccano create user ***
doccano web server --port ***
```
And then I got this log:
```
Booting worker with pid: 19
Booting worker with pid: 20
...
Booting worker with pid: 157
```
It run lots of worker and it took up a lot of memory. So, can I change the number_of_worker varlible. I saw the default number_of_worker= ``` multiprocessing.cpu_count()*2+1 ```. How could I change it?
Your Environment
---------
* Operating System: Linux
* Python Version Used: Python38
* When you install doccano: 2021-11-30
* How did you install doccano (Heroku button etc): pip install doccano
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cli.py`
Content:
```
1 import argparse
2 import multiprocessing
3 import os
4 import platform
5 import sys
6 from pathlib import Path
7
8 import django
9 from django.core import management
10
11 from .config.celery import app
12
13 DOCCANO_HOME = os.path.expanduser(os.environ.get("DOCCANO_HOME", "~/doccano"))
14 Path(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)
15 os.environ["STANDALONE"] = "True"
16 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
17 os.environ.setdefault("DATABASE_URL", os.path.join(f"sqlite:///{DOCCANO_HOME}", "db.sqlite3"))
18 os.environ.setdefault("MEDIA_ROOT", os.path.join(DOCCANO_HOME, "media"))
19 base = os.path.abspath(os.path.dirname(__file__))
20 sys.path.append(base)
21 django.setup()
22 parser = argparse.ArgumentParser(description="doccano, text annotation for machine learning practitioners.")
23
24
25 def number_of_workers():
26 return (multiprocessing.cpu_count() * 2) + 1
27
28
29 def is_windows():
30 return platform.system() == "Windows"
31
32
33 def run_on_nix(args):
34 import gunicorn.app.base
35 import gunicorn.util
36
37 class StandaloneApplication(gunicorn.app.base.BaseApplication):
38 def __init__(self, options=None):
39 self.options = options or {}
40 super().__init__()
41
42 def load_config(self):
43 config = {
44 key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None
45 }
46 for key, value in config.items():
47 self.cfg.set(key.lower(), value)
48
49 def load(self):
50 return gunicorn.util.import_app("config.wsgi")
51
52 options = {
53 "bind": "%s:%s" % ("0.0.0.0", args.port),
54 "workers": number_of_workers(),
55 "chdir": base,
56 "capture_output": True,
57 "loglevel": "debug",
58 }
59 StandaloneApplication(options).run()
60
61
62 def run_on_windows(args):
63 from waitress import serve
64
65 from config.wsgi import application
66
67 serve(application, port=args.port)
68
69
70 def command_db_init(args):
71 print("Setup Database.")
72 management.call_command("wait_for_db")
73 management.call_command("migrate")
74 management.call_command("create_roles")
75
76
77 def command_user_create(args):
78 print("Create admin user.")
79 management.call_command(
80 "create_admin", "--noinput", username=args.username, password=args.password, email=args.email
81 )
82
83
84 def command_migrate(args):
85 print("Start migration.")
86 management.call_command("migrate")
87
88
89 def command_run_webserver(args):
90 print(f"Starting server with port {args.port}.")
91 if is_windows():
92 run_on_windows(args)
93 else:
94 run_on_nix(args)
95
96
97 def command_run_task_queue(args):
98 print("Starting task queue.")
99 argv = [
100 "--app=config",
101 "--workdir={}".format(base),
102 "worker",
103 "--loglevel=info",
104 "--concurrency={}".format(args.concurrency),
105 ]
106 if is_windows():
107 argv.append("--pool=solo")
108 app.worker_main(argv=argv)
109
110
111 def command_help(args):
112 print(parser.parse_args([args.command, "--help"]))
113
114
115 def main():
116 # Create a command line parser.
117 subparsers = parser.add_subparsers()
118
119 # Create a parser for db initialization.
120 parser_init = subparsers.add_parser("init", help="see `init -h`")
121 parser_init.set_defaults(handler=command_db_init)
122
123 # Create a parser for migration.
124 parser_migration = subparsers.add_parser("migrate", help="Updates database schema.")
125 parser_migration.set_defaults(handler=command_migrate)
126
127 # Create a parser for user creation.
128 parser_create_user = subparsers.add_parser("createuser", help="see `createuser -h`")
129 parser_create_user.add_argument("--username", type=str, default="admin", help="admin username")
130 parser_create_user.add_argument("--password", type=str, default="password", help="admin password")
131 parser_create_user.add_argument("--email", type=str, default="[email protected]", help="admin email")
132 parser_create_user.set_defaults(handler=command_user_create)
133
134 # Create a parser for web server.
135 parser_server = subparsers.add_parser("webserver", help="see `webserver -h`")
136 parser_server.add_argument("--port", type=int, default=8000, help="port number")
137 parser_server.set_defaults(handler=command_run_webserver)
138
139 # Create a parser for task queue.
140 parser_queue = subparsers.add_parser("task", help="see `task -h`")
141 parser_queue.add_argument("--concurrency", type=int, default=2, help="concurrency")
142 parser_queue.set_defaults(handler=command_run_task_queue)
143
144 # Create a parser for help.
145 parser_help = subparsers.add_parser("help", help="see `help -h`")
146 parser_help.add_argument("command", help="command name which help is shown")
147 parser_help.set_defaults(handler=command_help)
148
149 # Dispatch handler.
150 args = parser.parse_args()
151 if hasattr(args, "handler"):
152 args.handler(args)
153 else:
154 # If specified unknown command, show help.
155 parser.print_help()
156
157
158 if __name__ == "__main__":
159 main()
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -51,7 +51,7 @@
options = {
"bind": "%s:%s" % ("0.0.0.0", args.port),
- "workers": number_of_workers(),
+ "workers": args.workers,
"chdir": base,
"capture_output": True,
"loglevel": "debug",
@@ -134,6 +134,7 @@
# Create a parser for web server.
parser_server = subparsers.add_parser("webserver", help="see `webserver -h`")
parser_server.add_argument("--port", type=int, default=8000, help="port number")
+ parser_server.add_argument("--workers", type=int, default=number_of_workers(), help="the number of workers")
parser_server.set_defaults(handler=command_run_webserver)
# Create a parser for task queue.
|
{"golden_diff": "diff --git a/backend/cli.py b/backend/cli.py\n--- a/backend/cli.py\n+++ b/backend/cli.py\n@@ -51,7 +51,7 @@\n \n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n- \"workers\": number_of_workers(),\n+ \"workers\": args.workers,\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n@@ -134,6 +134,7 @@\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n+ parser_server.add_argument(\"--workers\", type=int, default=number_of_workers(), help=\"the number of workers\")\n parser_server.set_defaults(handler=command_run_webserver)\n \n # Create a parser for task queue.\n", "issue": "How could I reduce number of workers?\nCould I reduce the number_of_workers?\r\n---------\r\nI run the doccano in my machine use this code.\r\n```\r\ndoccano init\r\ndoccano create user ***\r\ndoccano web server --port ***\r\n```\r\nAnd then I got this log: \r\n```\r\nBooting worker with pid: 19\r\nBooting worker with pid: 20\r\n...\r\nBooting worker with pid: 157\r\n```\r\nIt run lots of worker and it took up a lot of memory. So, can I change the number_of_worker varlible. I saw the default number_of_worker= ``` multiprocessing.cpu_count()*2+1 ```. How could I change it?\r\n\r\n\r\nYour Environment\r\n---------\r\n* Operating System: Linux\r\n* Python Version Used: Python38\r\n* When you install doccano: 2021-11-30\r\n* How did you install doccano (Heroku button etc): pip install doccano\r\n\n", "before_files": [{"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport django\nfrom django.core import management\n\nfrom .config.celery import app\n\nDOCCANO_HOME = os.path.expanduser(os.environ.get(\"DOCCANO_HOME\", \"~/doccano\"))\nPath(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)\nos.environ[\"STANDALONE\"] = \"True\"\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.production\")\nos.environ.setdefault(\"DATABASE_URL\", os.path.join(f\"sqlite:///{DOCCANO_HOME}\", \"db.sqlite3\"))\nos.environ.setdefault(\"MEDIA_ROOT\", os.path.join(DOCCANO_HOME, \"media\"))\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\ndjango.setup()\nparser = argparse.ArgumentParser(description=\"doccano, text annotation for machine learning practitioners.\")\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {\n key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app(\"config.wsgi\")\n\n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n \"workers\": number_of_workers(),\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n\n from config.wsgi import application\n\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print(\"Setup Database.\")\n management.call_command(\"wait_for_db\")\n management.call_command(\"migrate\")\n management.call_command(\"create_roles\")\n\n\ndef command_user_create(args):\n print(\"Create admin user.\")\n management.call_command(\n \"create_admin\", \"--noinput\", username=args.username, password=args.password, email=args.email\n )\n\n\ndef command_migrate(args):\n print(\"Start migration.\")\n management.call_command(\"migrate\")\n\n\ndef command_run_webserver(args):\n print(f\"Starting server with port {args.port}.\")\n if is_windows():\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print(\"Starting task queue.\")\n argv = [\n \"--app=config\",\n \"--workdir={}\".format(base),\n \"worker\",\n \"--loglevel=info\",\n \"--concurrency={}\".format(args.concurrency),\n ]\n if is_windows():\n argv.append(\"--pool=solo\")\n app.worker_main(argv=argv)\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, \"--help\"]))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser(\"init\", help=\"see `init -h`\")\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for migration.\n parser_migration = subparsers.add_parser(\"migrate\", help=\"Updates database schema.\")\n parser_migration.set_defaults(handler=command_migrate)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser(\"createuser\", help=\"see `createuser -h`\")\n parser_create_user.add_argument(\"--username\", type=str, default=\"admin\", help=\"admin username\")\n parser_create_user.add_argument(\"--password\", type=str, default=\"password\", help=\"admin password\")\n parser_create_user.add_argument(\"--email\", type=str, default=\"[email protected]\", help=\"admin email\")\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser(\"task\", help=\"see `task -h`\")\n parser_queue.add_argument(\"--concurrency\", type=int, default=2, help=\"concurrency\")\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser(\"help\", help=\"see `help -h`\")\n parser_help.add_argument(\"command\", help=\"command name which help is shown\")\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, \"handler\"):\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "backend/cli.py"}], "after_files": [{"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport django\nfrom django.core import management\n\nfrom .config.celery import app\n\nDOCCANO_HOME = os.path.expanduser(os.environ.get(\"DOCCANO_HOME\", \"~/doccano\"))\nPath(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)\nos.environ[\"STANDALONE\"] = \"True\"\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.production\")\nos.environ.setdefault(\"DATABASE_URL\", os.path.join(f\"sqlite:///{DOCCANO_HOME}\", \"db.sqlite3\"))\nos.environ.setdefault(\"MEDIA_ROOT\", os.path.join(DOCCANO_HOME, \"media\"))\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\ndjango.setup()\nparser = argparse.ArgumentParser(description=\"doccano, text annotation for machine learning practitioners.\")\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {\n key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app(\"config.wsgi\")\n\n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n \"workers\": args.workers,\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n\n from config.wsgi import application\n\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print(\"Setup Database.\")\n management.call_command(\"wait_for_db\")\n management.call_command(\"migrate\")\n management.call_command(\"create_roles\")\n\n\ndef command_user_create(args):\n print(\"Create admin user.\")\n management.call_command(\n \"create_admin\", \"--noinput\", username=args.username, password=args.password, email=args.email\n )\n\n\ndef command_migrate(args):\n print(\"Start migration.\")\n management.call_command(\"migrate\")\n\n\ndef command_run_webserver(args):\n print(f\"Starting server with port {args.port}.\")\n if is_windows():\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print(\"Starting task queue.\")\n argv = [\n \"--app=config\",\n \"--workdir={}\".format(base),\n \"worker\",\n \"--loglevel=info\",\n \"--concurrency={}\".format(args.concurrency),\n ]\n if is_windows():\n argv.append(\"--pool=solo\")\n app.worker_main(argv=argv)\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, \"--help\"]))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser(\"init\", help=\"see `init -h`\")\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for migration.\n parser_migration = subparsers.add_parser(\"migrate\", help=\"Updates database schema.\")\n parser_migration.set_defaults(handler=command_migrate)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser(\"createuser\", help=\"see `createuser -h`\")\n parser_create_user.add_argument(\"--username\", type=str, default=\"admin\", help=\"admin username\")\n parser_create_user.add_argument(\"--password\", type=str, default=\"password\", help=\"admin password\")\n parser_create_user.add_argument(\"--email\", type=str, default=\"[email protected]\", help=\"admin email\")\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n parser_server.add_argument(\"--workers\", type=int, default=number_of_workers(), help=\"the number of workers\")\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser(\"task\", help=\"see `task -h`\")\n parser_queue.add_argument(\"--concurrency\", type=int, default=2, help=\"concurrency\")\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser(\"help\", help=\"see `help -h`\")\n parser_help.add_argument(\"command\", help=\"command name which help is shown\")\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, \"handler\"):\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "backend/cli.py"}]}
| 1,990 | 218 |
gh_patches_debug_14081
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-439
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double check conditional HTTP implementation
The conditional HTTP implementation doesn't check the status code of the response at all. Determine if it should, and if it should update it to do the right thing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/cache/http.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import collections.abc
14 import functools
15
16 from pyramid.tweens import EXCVIEW
17
18
19 def _add_vary_callback(*varies):
20 def inner(request, response):
21 vary = set(response.vary if response.vary is not None else [])
22 vary |= set(varies)
23 response.vary = vary
24 return inner
25
26
27 def add_vary(*varies):
28 def inner(view):
29 @functools.wraps(view)
30 def wrapped(context, request):
31 request.add_response_callback(_add_vary_callback(*varies))
32 return view(context, request)
33 return wrapped
34 return inner
35
36
37 def cache_control(seconds, public=True):
38 def inner(view):
39 @functools.wraps(view)
40 def wrapped(context, request):
41 response = view(context, request)
42
43 if not request.registry.settings.get(
44 "pyramid.prevent_http_cache", False):
45 if seconds:
46 if public:
47 response.cache_control.public = True
48 else:
49 response.cache_control.private = True
50
51 response.cache_control.max_age = seconds
52 else:
53 response.cache_control.no_cache = True
54 response.cache_control.no_store = True
55 response.cache_control.must_revalidate = True
56
57 return response
58 return wrapped
59 return inner
60
61
62 def conditional_http_tween_factory(handler, registry):
63 def conditional_http_tween(request):
64 response = handler(request)
65
66 # If the Last-Modified header has been set, we want to enable the
67 # conditional response processing.
68 if response.last_modified is not None:
69 response.conditional_response = True
70
71 # We want to only enable the conditional machinery if either we
72 # were given an explicit ETag header by the view or we have a
73 # buffered response and can generate the ETag header ourself.
74 if response.etag is not None:
75 response.conditional_response = True
76 elif (isinstance(response.app_iter, collections.abc.Sequence) and
77 len(response.app_iter) == 1):
78 response.conditional_response = True
79 response.md5_etag()
80
81 return response
82 return conditional_http_tween
83
84
85 def includeme(config):
86 config.add_tween(
87 "warehouse.cache.http.conditional_http_tween_factory",
88 under=EXCVIEW,
89 )
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/cache/http.py b/warehouse/cache/http.py
--- a/warehouse/cache/http.py
+++ b/warehouse/cache/http.py
@@ -75,8 +75,13 @@
response.conditional_response = True
elif (isinstance(response.app_iter, collections.abc.Sequence) and
len(response.app_iter) == 1):
- response.conditional_response = True
- response.md5_etag()
+ # We can only reasonably implement automatic ETags on 200 responses
+ # to GET or HEAD requests. The subtles of doing it in other cases
+ # are too hard to get right.
+ if (request.method in {"GET", "HEAD"} and
+ response.status_code == 200):
+ response.conditional_response = True
+ response.md5_etag()
return response
return conditional_http_tween
|
{"golden_diff": "diff --git a/warehouse/cache/http.py b/warehouse/cache/http.py\n--- a/warehouse/cache/http.py\n+++ b/warehouse/cache/http.py\n@@ -75,8 +75,13 @@\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n- response.conditional_response = True\n- response.md5_etag()\n+ # We can only reasonably implement automatic ETags on 200 responses\n+ # to GET or HEAD requests. The subtles of doing it in other cases\n+ # are too hard to get right.\n+ if (request.method in {\"GET\", \"HEAD\"} and\n+ response.status_code == 200):\n+ response.conditional_response = True\n+ response.md5_etag()\n \n return response\n return conditional_http_tween\n", "issue": "Double check conditional HTTP implementation\nThe conditional HTTP implementation doesn't check the status code of the response at all. Determine if it should, and if it should update it to do the right thing.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections.abc\nimport functools\n\nfrom pyramid.tweens import EXCVIEW\n\n\ndef _add_vary_callback(*varies):\n def inner(request, response):\n vary = set(response.vary if response.vary is not None else [])\n vary |= set(varies)\n response.vary = vary\n return inner\n\n\ndef add_vary(*varies):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request.add_response_callback(_add_vary_callback(*varies))\n return view(context, request)\n return wrapped\n return inner\n\n\ndef cache_control(seconds, public=True):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n response = view(context, request)\n\n if not request.registry.settings.get(\n \"pyramid.prevent_http_cache\", False):\n if seconds:\n if public:\n response.cache_control.public = True\n else:\n response.cache_control.private = True\n\n response.cache_control.max_age = seconds\n else:\n response.cache_control.no_cache = True\n response.cache_control.no_store = True\n response.cache_control.must_revalidate = True\n\n return response\n return wrapped\n return inner\n\n\ndef conditional_http_tween_factory(handler, registry):\n def conditional_http_tween(request):\n response = handler(request)\n\n # If the Last-Modified header has been set, we want to enable the\n # conditional response processing.\n if response.last_modified is not None:\n response.conditional_response = True\n\n # We want to only enable the conditional machinery if either we\n # were given an explicit ETag header by the view or we have a\n # buffered response and can generate the ETag header ourself.\n if response.etag is not None:\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n response.conditional_response = True\n response.md5_etag()\n\n return response\n return conditional_http_tween\n\n\ndef includeme(config):\n config.add_tween(\n \"warehouse.cache.http.conditional_http_tween_factory\",\n under=EXCVIEW,\n )\n", "path": "warehouse/cache/http.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections.abc\nimport functools\n\nfrom pyramid.tweens import EXCVIEW\n\n\ndef _add_vary_callback(*varies):\n def inner(request, response):\n vary = set(response.vary if response.vary is not None else [])\n vary |= set(varies)\n response.vary = vary\n return inner\n\n\ndef add_vary(*varies):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request.add_response_callback(_add_vary_callback(*varies))\n return view(context, request)\n return wrapped\n return inner\n\n\ndef cache_control(seconds, public=True):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n response = view(context, request)\n\n if not request.registry.settings.get(\n \"pyramid.prevent_http_cache\", False):\n if seconds:\n if public:\n response.cache_control.public = True\n else:\n response.cache_control.private = True\n\n response.cache_control.max_age = seconds\n else:\n response.cache_control.no_cache = True\n response.cache_control.no_store = True\n response.cache_control.must_revalidate = True\n\n return response\n return wrapped\n return inner\n\n\ndef conditional_http_tween_factory(handler, registry):\n def conditional_http_tween(request):\n response = handler(request)\n\n # If the Last-Modified header has been set, we want to enable the\n # conditional response processing.\n if response.last_modified is not None:\n response.conditional_response = True\n\n # We want to only enable the conditional machinery if either we\n # were given an explicit ETag header by the view or we have a\n # buffered response and can generate the ETag header ourself.\n if response.etag is not None:\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n # We can only reasonably implement automatic ETags on 200 responses\n # to GET or HEAD requests. The subtles of doing it in other cases\n # are too hard to get right.\n if (request.method in {\"GET\", \"HEAD\"} and\n response.status_code == 200):\n response.conditional_response = True\n response.md5_etag()\n\n return response\n return conditional_http_tween\n\n\ndef includeme(config):\n config.add_tween(\n \"warehouse.cache.http.conditional_http_tween_factory\",\n under=EXCVIEW,\n )\n", "path": "warehouse/cache/http.py"}]}
| 1,076 | 198 |
gh_patches_debug_17467
|
rasdani/github-patches
|
git_diff
|
inventree__InvenTree-1662
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add purchase price to Part Stock table
Add column `purchase_price` to "Part Stock" table and make it sortable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/stock/serializers.py`
Content:
```
1 """
2 JSON serializers for Stock app
3 """
4
5 from rest_framework import serializers
6
7 from .models import StockItem, StockLocation
8 from .models import StockItemTracking
9 from .models import StockItemAttachment
10 from .models import StockItemTestResult
11
12 from django.db.models.functions import Coalesce
13
14 from django.db.models import Case, When, Value
15 from django.db.models import BooleanField
16 from django.db.models import Q
17
18 from sql_util.utils import SubquerySum, SubqueryCount
19
20 from decimal import Decimal
21
22 from datetime import datetime, timedelta
23
24 import common.models
25 from company.serializers import SupplierPartSerializer
26 from part.serializers import PartBriefSerializer
27 from InvenTree.serializers import UserSerializerBrief, InvenTreeModelSerializer
28 from InvenTree.serializers import InvenTreeAttachmentSerializerField
29
30
31 class LocationBriefSerializer(InvenTreeModelSerializer):
32 """
33 Provides a brief serializer for a StockLocation object
34 """
35
36 class Meta:
37 model = StockLocation
38 fields = [
39 'pk',
40 'name',
41 'pathstring',
42 ]
43
44
45 class StockItemSerializerBrief(InvenTreeModelSerializer):
46 """ Brief serializers for a StockItem """
47
48 location_name = serializers.CharField(source='location', read_only=True)
49 part_name = serializers.CharField(source='part.full_name', read_only=True)
50 quantity = serializers.FloatField()
51
52 class Meta:
53 model = StockItem
54 fields = [
55 'pk',
56 'uid',
57 'part',
58 'part_name',
59 'supplier_part',
60 'location',
61 'location_name',
62 'quantity',
63 ]
64
65
66 class StockItemSerializer(InvenTreeModelSerializer):
67 """ Serializer for a StockItem:
68
69 - Includes serialization for the linked part
70 - Includes serialization for the item location
71 """
72
73 @staticmethod
74 def prefetch_queryset(queryset):
75 """
76 Prefetch related database tables,
77 to reduce database hits.
78 """
79
80 return queryset.prefetch_related(
81 'belongs_to',
82 'build',
83 'customer',
84 'sales_order',
85 'supplier_part',
86 'supplier_part__supplier',
87 'supplier_part__manufacturer_part__manufacturer',
88 'allocations',
89 'sales_order_allocations',
90 'location',
91 'part',
92 'tracking_info',
93 )
94
95 @staticmethod
96 def annotate_queryset(queryset):
97 """
98 Add some extra annotations to the queryset,
99 performing database queries as efficiently as possible.
100 """
101
102 # Annotate the queryset with the total allocated to sales orders
103 queryset = queryset.annotate(
104 allocated=Coalesce(
105 SubquerySum('sales_order_allocations__quantity'), Decimal(0)
106 ) + Coalesce(
107 SubquerySum('allocations__quantity'), Decimal(0)
108 )
109 )
110
111 # Annotate the queryset with the number of tracking items
112 queryset = queryset.annotate(
113 tracking_items=SubqueryCount('tracking_info')
114 )
115
116 # Add flag to indicate if the StockItem has expired
117 queryset = queryset.annotate(
118 expired=Case(
119 When(
120 StockItem.EXPIRED_FILTER, then=Value(True, output_field=BooleanField()),
121 ),
122 default=Value(False, output_field=BooleanField())
123 )
124 )
125
126 # Add flag to indicate if the StockItem is stale
127 stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')
128 stale_date = datetime.now().date() + timedelta(days=stale_days)
129 stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)
130
131 queryset = queryset.annotate(
132 stale=Case(
133 When(
134 stale_filter, then=Value(True, output_field=BooleanField()),
135 ),
136 default=Value(False, output_field=BooleanField()),
137 )
138 )
139
140 return queryset
141
142 status_text = serializers.CharField(source='get_status_display', read_only=True)
143
144 supplier_part_detail = SupplierPartSerializer(source='supplier_part', many=False, read_only=True)
145
146 part_detail = PartBriefSerializer(source='part', many=False, read_only=True)
147
148 location_detail = LocationBriefSerializer(source='location', many=False, read_only=True)
149
150 tracking_items = serializers.IntegerField(source='tracking_info_count', read_only=True, required=False)
151
152 quantity = serializers.FloatField()
153
154 allocated = serializers.FloatField(source='allocation_count', required=False)
155
156 expired = serializers.BooleanField(required=False, read_only=True)
157
158 stale = serializers.BooleanField(required=False, read_only=True)
159
160 serial = serializers.CharField(required=False)
161
162 required_tests = serializers.IntegerField(source='required_test_count', read_only=True, required=False)
163
164 def __init__(self, *args, **kwargs):
165
166 part_detail = kwargs.pop('part_detail', False)
167 location_detail = kwargs.pop('location_detail', False)
168 supplier_part_detail = kwargs.pop('supplier_part_detail', False)
169 test_detail = kwargs.pop('test_detail', False)
170
171 super(StockItemSerializer, self).__init__(*args, **kwargs)
172
173 if part_detail is not True:
174 self.fields.pop('part_detail')
175
176 if location_detail is not True:
177 self.fields.pop('location_detail')
178
179 if supplier_part_detail is not True:
180 self.fields.pop('supplier_part_detail')
181
182 if test_detail is not True:
183 self.fields.pop('required_tests')
184
185 class Meta:
186 model = StockItem
187 fields = [
188 'allocated',
189 'batch',
190 'belongs_to',
191 'build',
192 'customer',
193 'expired',
194 'expiry_date',
195 'in_stock',
196 'is_building',
197 'link',
198 'location',
199 'location_detail',
200 'notes',
201 'packaging',
202 'part',
203 'part_detail',
204 'pk',
205 'quantity',
206 'required_tests',
207 'sales_order',
208 'serial',
209 'stale',
210 'status',
211 'status_text',
212 'stocktake_date',
213 'supplier_part',
214 'supplier_part_detail',
215 'tracking_items',
216 'uid',
217 'updated',
218 ]
219
220 """ These fields are read-only in this context.
221 They can be updated by accessing the appropriate API endpoints
222 """
223 read_only_fields = [
224 'allocated',
225 'stocktake_date',
226 'stocktake_user',
227 'updated',
228 'in_stock'
229 ]
230
231
232 class StockQuantitySerializer(InvenTreeModelSerializer):
233
234 class Meta:
235 model = StockItem
236 fields = ('quantity',)
237
238
239 class LocationSerializer(InvenTreeModelSerializer):
240 """ Detailed information about a stock location
241 """
242
243 url = serializers.CharField(source='get_absolute_url', read_only=True)
244
245 items = serializers.IntegerField(source='item_count', read_only=True)
246
247 class Meta:
248 model = StockLocation
249 fields = [
250 'pk',
251 'url',
252 'name',
253 'description',
254 'parent',
255 'pathstring',
256 'items',
257 ]
258
259
260 class StockItemAttachmentSerializer(InvenTreeModelSerializer):
261 """ Serializer for StockItemAttachment model """
262
263 def __init__(self, *args, **kwargs):
264 user_detail = kwargs.pop('user_detail', False)
265
266 super().__init__(*args, **kwargs)
267
268 if user_detail is not True:
269 self.fields.pop('user_detail')
270
271 user_detail = UserSerializerBrief(source='user', read_only=True)
272
273 attachment = InvenTreeAttachmentSerializerField(required=True)
274
275 class Meta:
276 model = StockItemAttachment
277
278 fields = [
279 'pk',
280 'stock_item',
281 'attachment',
282 'comment',
283 'upload_date',
284 'user',
285 'user_detail',
286 ]
287
288 read_only_fields = [
289 'upload_date',
290 'user',
291 'user_detail'
292 ]
293
294
295 class StockItemTestResultSerializer(InvenTreeModelSerializer):
296 """ Serializer for the StockItemTestResult model """
297
298 user_detail = UserSerializerBrief(source='user', read_only=True)
299
300 key = serializers.CharField(read_only=True)
301
302 attachment = InvenTreeAttachmentSerializerField(required=False)
303
304 def __init__(self, *args, **kwargs):
305 user_detail = kwargs.pop('user_detail', False)
306
307 super().__init__(*args, **kwargs)
308
309 if user_detail is not True:
310 self.fields.pop('user_detail')
311
312 class Meta:
313 model = StockItemTestResult
314
315 fields = [
316 'pk',
317 'stock_item',
318 'key',
319 'test',
320 'result',
321 'value',
322 'attachment',
323 'notes',
324 'user',
325 'user_detail',
326 'date'
327 ]
328
329 read_only_fields = [
330 'pk',
331 'user',
332 'date',
333 ]
334
335
336 class StockTrackingSerializer(InvenTreeModelSerializer):
337 """ Serializer for StockItemTracking model """
338
339 def __init__(self, *args, **kwargs):
340
341 item_detail = kwargs.pop('item_detail', False)
342 user_detail = kwargs.pop('user_detail', False)
343
344 super().__init__(*args, **kwargs)
345
346 if item_detail is not True:
347 self.fields.pop('item_detail')
348
349 if user_detail is not True:
350 self.fields.pop('user_detail')
351
352 label = serializers.CharField(read_only=True)
353
354 item_detail = StockItemSerializerBrief(source='item', many=False, read_only=True)
355
356 user_detail = UserSerializerBrief(source='user', many=False, read_only=True)
357
358 deltas = serializers.JSONField(read_only=True)
359
360 class Meta:
361 model = StockItemTracking
362 fields = [
363 'pk',
364 'item',
365 'item_detail',
366 'date',
367 'deltas',
368 'label',
369 'notes',
370 'tracking_type',
371 'user',
372 'user_detail',
373 ]
374
375 read_only_fields = [
376 'date',
377 'user',
378 'label',
379 'tracking_type',
380 ]
381
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/InvenTree/stock/serializers.py b/InvenTree/stock/serializers.py
--- a/InvenTree/stock/serializers.py
+++ b/InvenTree/stock/serializers.py
@@ -161,6 +161,13 @@
required_tests = serializers.IntegerField(source='required_test_count', read_only=True, required=False)
+ purchase_price = serializers.SerializerMethodField()
+
+ def get_purchase_price(self, obj):
+ """ Return purchase_price (Money field) as string (includes currency) """
+
+ return str(obj.purchase_price) if obj.purchase_price else '-'
+
def __init__(self, *args, **kwargs):
part_detail = kwargs.pop('part_detail', False)
@@ -215,6 +222,7 @@
'tracking_items',
'uid',
'updated',
+ 'purchase_price',
]
""" These fields are read-only in this context.
|
{"golden_diff": "diff --git a/InvenTree/stock/serializers.py b/InvenTree/stock/serializers.py\n--- a/InvenTree/stock/serializers.py\n+++ b/InvenTree/stock/serializers.py\n@@ -161,6 +161,13 @@\n \n required_tests = serializers.IntegerField(source='required_test_count', read_only=True, required=False)\n \n+ purchase_price = serializers.SerializerMethodField()\n+\n+ def get_purchase_price(self, obj):\n+ \"\"\" Return purchase_price (Money field) as string (includes currency) \"\"\"\n+\n+ return str(obj.purchase_price) if obj.purchase_price else '-'\n+\n def __init__(self, *args, **kwargs):\n \n part_detail = kwargs.pop('part_detail', False)\n@@ -215,6 +222,7 @@\n 'tracking_items',\n 'uid',\n 'updated',\n+ 'purchase_price',\n ]\n \n \"\"\" These fields are read-only in this context.\n", "issue": "Add purchase price to Part Stock table\nAdd column `purchase_price` to \"Part Stock\" table and make it sortable.\n", "before_files": [{"content": "\"\"\"\nJSON serializers for Stock app\n\"\"\"\n\nfrom rest_framework import serializers\n\nfrom .models import StockItem, StockLocation\nfrom .models import StockItemTracking\nfrom .models import StockItemAttachment\nfrom .models import StockItemTestResult\n\nfrom django.db.models.functions import Coalesce\n\nfrom django.db.models import Case, When, Value\nfrom django.db.models import BooleanField\nfrom django.db.models import Q\n\nfrom sql_util.utils import SubquerySum, SubqueryCount\n\nfrom decimal import Decimal\n\nfrom datetime import datetime, timedelta\n\nimport common.models\nfrom company.serializers import SupplierPartSerializer\nfrom part.serializers import PartBriefSerializer\nfrom InvenTree.serializers import UserSerializerBrief, InvenTreeModelSerializer\nfrom InvenTree.serializers import InvenTreeAttachmentSerializerField\n\n\nclass LocationBriefSerializer(InvenTreeModelSerializer):\n \"\"\"\n Provides a brief serializer for a StockLocation object\n \"\"\"\n\n class Meta:\n model = StockLocation\n fields = [\n 'pk',\n 'name',\n 'pathstring',\n ]\n\n\nclass StockItemSerializerBrief(InvenTreeModelSerializer):\n \"\"\" Brief serializers for a StockItem \"\"\"\n\n location_name = serializers.CharField(source='location', read_only=True)\n part_name = serializers.CharField(source='part.full_name', read_only=True)\n quantity = serializers.FloatField()\n\n class Meta:\n model = StockItem\n fields = [\n 'pk',\n 'uid',\n 'part',\n 'part_name',\n 'supplier_part',\n 'location',\n 'location_name',\n 'quantity',\n ]\n\n\nclass StockItemSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for a StockItem:\n\n - Includes serialization for the linked part\n - Includes serialization for the item location\n \"\"\"\n\n @staticmethod\n def prefetch_queryset(queryset):\n \"\"\"\n Prefetch related database tables,\n to reduce database hits.\n \"\"\"\n\n return queryset.prefetch_related(\n 'belongs_to',\n 'build',\n 'customer',\n 'sales_order',\n 'supplier_part',\n 'supplier_part__supplier',\n 'supplier_part__manufacturer_part__manufacturer',\n 'allocations',\n 'sales_order_allocations',\n 'location',\n 'part',\n 'tracking_info',\n )\n\n @staticmethod\n def annotate_queryset(queryset):\n \"\"\"\n Add some extra annotations to the queryset,\n performing database queries as efficiently as possible.\n \"\"\"\n\n # Annotate the queryset with the total allocated to sales orders\n queryset = queryset.annotate(\n allocated=Coalesce(\n SubquerySum('sales_order_allocations__quantity'), Decimal(0)\n ) + Coalesce(\n SubquerySum('allocations__quantity'), Decimal(0)\n )\n )\n\n # Annotate the queryset with the number of tracking items\n queryset = queryset.annotate(\n tracking_items=SubqueryCount('tracking_info')\n )\n\n # Add flag to indicate if the StockItem has expired\n queryset = queryset.annotate(\n expired=Case(\n When(\n StockItem.EXPIRED_FILTER, then=Value(True, output_field=BooleanField()),\n ),\n default=Value(False, output_field=BooleanField())\n )\n )\n\n # Add flag to indicate if the StockItem is stale\n stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')\n stale_date = datetime.now().date() + timedelta(days=stale_days)\n stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)\n\n queryset = queryset.annotate(\n stale=Case(\n When(\n stale_filter, then=Value(True, output_field=BooleanField()),\n ),\n default=Value(False, output_field=BooleanField()),\n )\n )\n\n return queryset\n\n status_text = serializers.CharField(source='get_status_display', read_only=True)\n\n supplier_part_detail = SupplierPartSerializer(source='supplier_part', many=False, read_only=True)\n\n part_detail = PartBriefSerializer(source='part', many=False, read_only=True)\n\n location_detail = LocationBriefSerializer(source='location', many=False, read_only=True)\n\n tracking_items = serializers.IntegerField(source='tracking_info_count', read_only=True, required=False)\n\n quantity = serializers.FloatField()\n\n allocated = serializers.FloatField(source='allocation_count', required=False)\n\n expired = serializers.BooleanField(required=False, read_only=True)\n\n stale = serializers.BooleanField(required=False, read_only=True)\n\n serial = serializers.CharField(required=False)\n\n required_tests = serializers.IntegerField(source='required_test_count', read_only=True, required=False)\n\n def __init__(self, *args, **kwargs):\n\n part_detail = kwargs.pop('part_detail', False)\n location_detail = kwargs.pop('location_detail', False)\n supplier_part_detail = kwargs.pop('supplier_part_detail', False)\n test_detail = kwargs.pop('test_detail', False)\n\n super(StockItemSerializer, self).__init__(*args, **kwargs)\n\n if part_detail is not True:\n self.fields.pop('part_detail')\n\n if location_detail is not True:\n self.fields.pop('location_detail')\n\n if supplier_part_detail is not True:\n self.fields.pop('supplier_part_detail')\n\n if test_detail is not True:\n self.fields.pop('required_tests')\n\n class Meta:\n model = StockItem\n fields = [\n 'allocated',\n 'batch',\n 'belongs_to',\n 'build',\n 'customer',\n 'expired',\n 'expiry_date',\n 'in_stock',\n 'is_building',\n 'link',\n 'location',\n 'location_detail',\n 'notes',\n 'packaging',\n 'part',\n 'part_detail',\n 'pk',\n 'quantity',\n 'required_tests',\n 'sales_order',\n 'serial',\n 'stale',\n 'status',\n 'status_text',\n 'stocktake_date',\n 'supplier_part',\n 'supplier_part_detail',\n 'tracking_items',\n 'uid',\n 'updated',\n ]\n\n \"\"\" These fields are read-only in this context.\n They can be updated by accessing the appropriate API endpoints\n \"\"\"\n read_only_fields = [\n 'allocated',\n 'stocktake_date',\n 'stocktake_user',\n 'updated',\n 'in_stock'\n ]\n\n\nclass StockQuantitySerializer(InvenTreeModelSerializer):\n\n class Meta:\n model = StockItem\n fields = ('quantity',)\n\n\nclass LocationSerializer(InvenTreeModelSerializer):\n \"\"\" Detailed information about a stock location\n \"\"\"\n\n url = serializers.CharField(source='get_absolute_url', read_only=True)\n\n items = serializers.IntegerField(source='item_count', read_only=True)\n\n class Meta:\n model = StockLocation\n fields = [\n 'pk',\n 'url',\n 'name',\n 'description',\n 'parent',\n 'pathstring',\n 'items',\n ]\n\n\nclass StockItemAttachmentSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for StockItemAttachment model \"\"\"\n\n def __init__(self, *args, **kwargs):\n user_detail = kwargs.pop('user_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if user_detail is not True:\n self.fields.pop('user_detail')\n\n user_detail = UserSerializerBrief(source='user', read_only=True)\n\n attachment = InvenTreeAttachmentSerializerField(required=True)\n\n class Meta:\n model = StockItemAttachment\n\n fields = [\n 'pk',\n 'stock_item',\n 'attachment',\n 'comment',\n 'upload_date',\n 'user',\n 'user_detail',\n ]\n\n read_only_fields = [\n 'upload_date',\n 'user',\n 'user_detail'\n ]\n\n\nclass StockItemTestResultSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for the StockItemTestResult model \"\"\"\n\n user_detail = UserSerializerBrief(source='user', read_only=True)\n\n key = serializers.CharField(read_only=True)\n\n attachment = InvenTreeAttachmentSerializerField(required=False)\n\n def __init__(self, *args, **kwargs):\n user_detail = kwargs.pop('user_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if user_detail is not True:\n self.fields.pop('user_detail')\n\n class Meta:\n model = StockItemTestResult\n\n fields = [\n 'pk',\n 'stock_item',\n 'key',\n 'test',\n 'result',\n 'value',\n 'attachment',\n 'notes',\n 'user',\n 'user_detail',\n 'date'\n ]\n\n read_only_fields = [\n 'pk',\n 'user',\n 'date',\n ]\n\n\nclass StockTrackingSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for StockItemTracking model \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n item_detail = kwargs.pop('item_detail', False)\n user_detail = kwargs.pop('user_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if item_detail is not True:\n self.fields.pop('item_detail')\n\n if user_detail is not True:\n self.fields.pop('user_detail')\n\n label = serializers.CharField(read_only=True)\n\n item_detail = StockItemSerializerBrief(source='item', many=False, read_only=True)\n\n user_detail = UserSerializerBrief(source='user', many=False, read_only=True)\n\n deltas = serializers.JSONField(read_only=True)\n\n class Meta:\n model = StockItemTracking\n fields = [\n 'pk',\n 'item',\n 'item_detail',\n 'date',\n 'deltas',\n 'label',\n 'notes',\n 'tracking_type',\n 'user',\n 'user_detail',\n ]\n\n read_only_fields = [\n 'date',\n 'user',\n 'label',\n 'tracking_type',\n ]\n", "path": "InvenTree/stock/serializers.py"}], "after_files": [{"content": "\"\"\"\nJSON serializers for Stock app\n\"\"\"\n\nfrom rest_framework import serializers\n\nfrom .models import StockItem, StockLocation\nfrom .models import StockItemTracking\nfrom .models import StockItemAttachment\nfrom .models import StockItemTestResult\n\nfrom django.db.models.functions import Coalesce\n\nfrom django.db.models import Case, When, Value\nfrom django.db.models import BooleanField\nfrom django.db.models import Q\n\nfrom sql_util.utils import SubquerySum, SubqueryCount\n\nfrom decimal import Decimal\n\nfrom datetime import datetime, timedelta\n\nimport common.models\nfrom company.serializers import SupplierPartSerializer\nfrom part.serializers import PartBriefSerializer\nfrom InvenTree.serializers import UserSerializerBrief, InvenTreeModelSerializer\nfrom InvenTree.serializers import InvenTreeAttachmentSerializerField\n\n\nclass LocationBriefSerializer(InvenTreeModelSerializer):\n \"\"\"\n Provides a brief serializer for a StockLocation object\n \"\"\"\n\n class Meta:\n model = StockLocation\n fields = [\n 'pk',\n 'name',\n 'pathstring',\n ]\n\n\nclass StockItemSerializerBrief(InvenTreeModelSerializer):\n \"\"\" Brief serializers for a StockItem \"\"\"\n\n location_name = serializers.CharField(source='location', read_only=True)\n part_name = serializers.CharField(source='part.full_name', read_only=True)\n quantity = serializers.FloatField()\n\n class Meta:\n model = StockItem\n fields = [\n 'pk',\n 'uid',\n 'part',\n 'part_name',\n 'supplier_part',\n 'location',\n 'location_name',\n 'quantity',\n ]\n\n\nclass StockItemSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for a StockItem:\n\n - Includes serialization for the linked part\n - Includes serialization for the item location\n \"\"\"\n\n @staticmethod\n def prefetch_queryset(queryset):\n \"\"\"\n Prefetch related database tables,\n to reduce database hits.\n \"\"\"\n\n return queryset.prefetch_related(\n 'belongs_to',\n 'build',\n 'customer',\n 'sales_order',\n 'supplier_part',\n 'supplier_part__supplier',\n 'supplier_part__manufacturer_part__manufacturer',\n 'allocations',\n 'sales_order_allocations',\n 'location',\n 'part',\n 'tracking_info',\n )\n\n @staticmethod\n def annotate_queryset(queryset):\n \"\"\"\n Add some extra annotations to the queryset,\n performing database queries as efficiently as possible.\n \"\"\"\n\n # Annotate the queryset with the total allocated to sales orders\n queryset = queryset.annotate(\n allocated=Coalesce(\n SubquerySum('sales_order_allocations__quantity'), Decimal(0)\n ) + Coalesce(\n SubquerySum('allocations__quantity'), Decimal(0)\n )\n )\n\n # Annotate the queryset with the number of tracking items\n queryset = queryset.annotate(\n tracking_items=SubqueryCount('tracking_info')\n )\n\n # Add flag to indicate if the StockItem has expired\n queryset = queryset.annotate(\n expired=Case(\n When(\n StockItem.EXPIRED_FILTER, then=Value(True, output_field=BooleanField()),\n ),\n default=Value(False, output_field=BooleanField())\n )\n )\n\n # Add flag to indicate if the StockItem is stale\n stale_days = common.models.InvenTreeSetting.get_setting('STOCK_STALE_DAYS')\n stale_date = datetime.now().date() + timedelta(days=stale_days)\n stale_filter = StockItem.IN_STOCK_FILTER & ~Q(expiry_date=None) & Q(expiry_date__lt=stale_date)\n\n queryset = queryset.annotate(\n stale=Case(\n When(\n stale_filter, then=Value(True, output_field=BooleanField()),\n ),\n default=Value(False, output_field=BooleanField()),\n )\n )\n\n return queryset\n\n status_text = serializers.CharField(source='get_status_display', read_only=True)\n\n supplier_part_detail = SupplierPartSerializer(source='supplier_part', many=False, read_only=True)\n\n part_detail = PartBriefSerializer(source='part', many=False, read_only=True)\n\n location_detail = LocationBriefSerializer(source='location', many=False, read_only=True)\n\n tracking_items = serializers.IntegerField(source='tracking_info_count', read_only=True, required=False)\n\n quantity = serializers.FloatField()\n\n allocated = serializers.FloatField(source='allocation_count', required=False)\n\n expired = serializers.BooleanField(required=False, read_only=True)\n\n stale = serializers.BooleanField(required=False, read_only=True)\n\n serial = serializers.CharField(required=False)\n\n required_tests = serializers.IntegerField(source='required_test_count', read_only=True, required=False)\n\n purchase_price = serializers.SerializerMethodField()\n\n def get_purchase_price(self, obj):\n \"\"\" Return purchase_price (Money field) as string (includes currency) \"\"\"\n\n return str(obj.purchase_price) if obj.purchase_price else '-'\n\n def __init__(self, *args, **kwargs):\n\n part_detail = kwargs.pop('part_detail', False)\n location_detail = kwargs.pop('location_detail', False)\n supplier_part_detail = kwargs.pop('supplier_part_detail', False)\n test_detail = kwargs.pop('test_detail', False)\n\n super(StockItemSerializer, self).__init__(*args, **kwargs)\n\n if part_detail is not True:\n self.fields.pop('part_detail')\n\n if location_detail is not True:\n self.fields.pop('location_detail')\n\n if supplier_part_detail is not True:\n self.fields.pop('supplier_part_detail')\n\n if test_detail is not True:\n self.fields.pop('required_tests')\n\n class Meta:\n model = StockItem\n fields = [\n 'allocated',\n 'batch',\n 'belongs_to',\n 'build',\n 'customer',\n 'expired',\n 'expiry_date',\n 'in_stock',\n 'is_building',\n 'link',\n 'location',\n 'location_detail',\n 'notes',\n 'packaging',\n 'part',\n 'part_detail',\n 'pk',\n 'quantity',\n 'required_tests',\n 'sales_order',\n 'serial',\n 'stale',\n 'status',\n 'status_text',\n 'stocktake_date',\n 'supplier_part',\n 'supplier_part_detail',\n 'tracking_items',\n 'uid',\n 'updated',\n 'purchase_price',\n ]\n\n \"\"\" These fields are read-only in this context.\n They can be updated by accessing the appropriate API endpoints\n \"\"\"\n read_only_fields = [\n 'allocated',\n 'stocktake_date',\n 'stocktake_user',\n 'updated',\n 'in_stock'\n ]\n\n\nclass StockQuantitySerializer(InvenTreeModelSerializer):\n\n class Meta:\n model = StockItem\n fields = ('quantity',)\n\n\nclass LocationSerializer(InvenTreeModelSerializer):\n \"\"\" Detailed information about a stock location\n \"\"\"\n\n url = serializers.CharField(source='get_absolute_url', read_only=True)\n\n items = serializers.IntegerField(source='item_count', read_only=True)\n\n class Meta:\n model = StockLocation\n fields = [\n 'pk',\n 'url',\n 'name',\n 'description',\n 'parent',\n 'pathstring',\n 'items',\n ]\n\n\nclass StockItemAttachmentSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for StockItemAttachment model \"\"\"\n\n def __init__(self, *args, **kwargs):\n user_detail = kwargs.pop('user_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if user_detail is not True:\n self.fields.pop('user_detail')\n\n user_detail = UserSerializerBrief(source='user', read_only=True)\n\n attachment = InvenTreeAttachmentSerializerField(required=True)\n\n class Meta:\n model = StockItemAttachment\n\n fields = [\n 'pk',\n 'stock_item',\n 'attachment',\n 'comment',\n 'upload_date',\n 'user',\n 'user_detail',\n ]\n\n read_only_fields = [\n 'upload_date',\n 'user',\n 'user_detail'\n ]\n\n\nclass StockItemTestResultSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for the StockItemTestResult model \"\"\"\n\n user_detail = UserSerializerBrief(source='user', read_only=True)\n\n key = serializers.CharField(read_only=True)\n\n attachment = InvenTreeAttachmentSerializerField(required=False)\n\n def __init__(self, *args, **kwargs):\n user_detail = kwargs.pop('user_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if user_detail is not True:\n self.fields.pop('user_detail')\n\n class Meta:\n model = StockItemTestResult\n\n fields = [\n 'pk',\n 'stock_item',\n 'key',\n 'test',\n 'result',\n 'value',\n 'attachment',\n 'notes',\n 'user',\n 'user_detail',\n 'date'\n ]\n\n read_only_fields = [\n 'pk',\n 'user',\n 'date',\n ]\n\n\nclass StockTrackingSerializer(InvenTreeModelSerializer):\n \"\"\" Serializer for StockItemTracking model \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n item_detail = kwargs.pop('item_detail', False)\n user_detail = kwargs.pop('user_detail', False)\n\n super().__init__(*args, **kwargs)\n\n if item_detail is not True:\n self.fields.pop('item_detail')\n\n if user_detail is not True:\n self.fields.pop('user_detail')\n\n label = serializers.CharField(read_only=True)\n\n item_detail = StockItemSerializerBrief(source='item', many=False, read_only=True)\n\n user_detail = UserSerializerBrief(source='user', many=False, read_only=True)\n\n deltas = serializers.JSONField(read_only=True)\n\n class Meta:\n model = StockItemTracking\n fields = [\n 'pk',\n 'item',\n 'item_detail',\n 'date',\n 'deltas',\n 'label',\n 'notes',\n 'tracking_type',\n 'user',\n 'user_detail',\n ]\n\n read_only_fields = [\n 'date',\n 'user',\n 'label',\n 'tracking_type',\n ]\n", "path": "InvenTree/stock/serializers.py"}]}
| 3,436 | 216 |
gh_patches_debug_35947
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invert constraints
I can imagine situations where it would be useful to be able to invert constraints so that they do the opposite of what they are designed to do.
```python
from opsdroid.skill import Skill
from opsdroid.matchers import match_regex
from opsdroid.constraints import constrain_users
class MySkill(Skill):
@match_regex(r'hi')
@constrain_users(['alice', 'bob'], invert=True)
async def hello(self, message):
"""Says 'Hey' to anyone EXCEPT 'alice' and 'bob'."""
await message.respond('Hey')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/constraints.py`
Content:
```
1 """Decorator functions to use when creating skill modules.
2
3 These decorators are for specifying when a skill should not be called despite
4 having a matcher which matches the current message.
5 """
6
7 import logging
8
9 from opsdroid.helper import add_skill_attributes
10
11
12 _LOGGER = logging.getLogger(__name__)
13
14
15 def constrain_rooms(rooms):
16 """Return room constraint decorator."""
17
18 def constraint_decorator(func):
19 """Add room constraint to skill."""
20
21 def constraint_callback(message, rooms=rooms):
22 """Check if the room is correct."""
23 return message.target in rooms
24
25 func = add_skill_attributes(func)
26 func.constraints.append(constraint_callback)
27 return func
28
29 return constraint_decorator
30
31
32 def constrain_users(users):
33 """Return user constraint decorator."""
34
35 def constraint_decorator(func):
36 """Add user constraint to skill."""
37
38 def constraint_callback(message, users=users):
39 """Check if the user is correct."""
40 return message.user in users
41
42 func = add_skill_attributes(func)
43 func.constraints.append(constraint_callback)
44 return func
45
46 return constraint_decorator
47
48
49 def constrain_connectors(connectors):
50 """Return connector constraint decorator."""
51
52 def constraint_decorator(func):
53 """Add connectors constraint to skill."""
54
55 def constraint_callback(message, connectors=connectors):
56 """Check if the connectors is correct."""
57 return message.connector and (message.connector.name in connectors)
58
59 func = add_skill_attributes(func)
60 func.constraints.append(constraint_callback)
61 return func
62
63 return constraint_decorator
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/constraints.py b/opsdroid/constraints.py
--- a/opsdroid/constraints.py
+++ b/opsdroid/constraints.py
@@ -5,6 +5,7 @@
"""
import logging
+from functools import wraps
from opsdroid.helper import add_skill_attributes
@@ -12,7 +13,17 @@
_LOGGER = logging.getLogger(__name__)
-def constrain_rooms(rooms):
+def invert_wrapper(func):
+ """Inverts the result of a function."""
+
+ @wraps(func)
+ def inverted_func(*args, **kwargs):
+ return not func(*args, **kwargs)
+
+ return inverted_func
+
+
+def constrain_rooms(rooms, invert=False):
"""Return room constraint decorator."""
def constraint_decorator(func):
@@ -23,13 +34,15 @@
return message.target in rooms
func = add_skill_attributes(func)
+ if invert:
+ constraint_callback = invert_wrapper(constraint_callback)
func.constraints.append(constraint_callback)
return func
return constraint_decorator
-def constrain_users(users):
+def constrain_users(users, invert=False):
"""Return user constraint decorator."""
def constraint_decorator(func):
@@ -40,13 +53,15 @@
return message.user in users
func = add_skill_attributes(func)
+ if invert:
+ constraint_callback = invert_wrapper(constraint_callback)
func.constraints.append(constraint_callback)
return func
return constraint_decorator
-def constrain_connectors(connectors):
+def constrain_connectors(connectors, invert=False):
"""Return connector constraint decorator."""
def constraint_decorator(func):
@@ -57,6 +72,8 @@
return message.connector and (message.connector.name in connectors)
func = add_skill_attributes(func)
+ if invert:
+ constraint_callback = invert_wrapper(constraint_callback)
func.constraints.append(constraint_callback)
return func
|
{"golden_diff": "diff --git a/opsdroid/constraints.py b/opsdroid/constraints.py\n--- a/opsdroid/constraints.py\n+++ b/opsdroid/constraints.py\n@@ -5,6 +5,7 @@\n \"\"\"\n \n import logging\n+from functools import wraps\n \n from opsdroid.helper import add_skill_attributes\n \n@@ -12,7 +13,17 @@\n _LOGGER = logging.getLogger(__name__)\n \n \n-def constrain_rooms(rooms):\n+def invert_wrapper(func):\n+ \"\"\"Inverts the result of a function.\"\"\"\n+\n+ @wraps(func)\n+ def inverted_func(*args, **kwargs):\n+ return not func(*args, **kwargs)\n+\n+ return inverted_func\n+\n+\n+def constrain_rooms(rooms, invert=False):\n \"\"\"Return room constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -23,13 +34,15 @@\n return message.target in rooms\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n \n return constraint_decorator\n \n \n-def constrain_users(users):\n+def constrain_users(users, invert=False):\n \"\"\"Return user constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -40,13 +53,15 @@\n return message.user in users\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n \n return constraint_decorator\n \n \n-def constrain_connectors(connectors):\n+def constrain_connectors(connectors, invert=False):\n \"\"\"Return connector constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -57,6 +72,8 @@\n return message.connector and (message.connector.name in connectors)\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n", "issue": "Invert constraints\nI can imagine situations where it would be useful to be able to invert constraints so that they do the opposite of what they are designed to do.\r\n\r\n```python\r\nfrom opsdroid.skill import Skill\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid.constraints import constrain_users\r\n\r\nclass MySkill(Skill):\r\n\r\n @match_regex(r'hi')\r\n @constrain_users(['alice', 'bob'], invert=True)\r\n async def hello(self, message):\r\n \"\"\"Says 'Hey' to anyone EXCEPT 'alice' and 'bob'.\"\"\"\r\n await message.respond('Hey')\r\n```\n", "before_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\n\nThese decorators are for specifying when a skill should not be called despite\nhaving a matcher which matches the current message.\n\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import add_skill_attributes\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef constrain_rooms(rooms):\n \"\"\"Return room constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add room constraint to skill.\"\"\"\n\n def constraint_callback(message, rooms=rooms):\n \"\"\"Check if the room is correct.\"\"\"\n return message.target in rooms\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_users(users):\n \"\"\"Return user constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add user constraint to skill.\"\"\"\n\n def constraint_callback(message, users=users):\n \"\"\"Check if the user is correct.\"\"\"\n return message.user in users\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_connectors(connectors):\n \"\"\"Return connector constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add connectors constraint to skill.\"\"\"\n\n def constraint_callback(message, connectors=connectors):\n \"\"\"Check if the connectors is correct.\"\"\"\n return message.connector and (message.connector.name in connectors)\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n", "path": "opsdroid/constraints.py"}], "after_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\n\nThese decorators are for specifying when a skill should not be called despite\nhaving a matcher which matches the current message.\n\"\"\"\n\nimport logging\nfrom functools import wraps\n\nfrom opsdroid.helper import add_skill_attributes\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef invert_wrapper(func):\n \"\"\"Inverts the result of a function.\"\"\"\n\n @wraps(func)\n def inverted_func(*args, **kwargs):\n return not func(*args, **kwargs)\n\n return inverted_func\n\n\ndef constrain_rooms(rooms, invert=False):\n \"\"\"Return room constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add room constraint to skill.\"\"\"\n\n def constraint_callback(message, rooms=rooms):\n \"\"\"Check if the room is correct.\"\"\"\n return message.target in rooms\n\n func = add_skill_attributes(func)\n if invert:\n constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_users(users, invert=False):\n \"\"\"Return user constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add user constraint to skill.\"\"\"\n\n def constraint_callback(message, users=users):\n \"\"\"Check if the user is correct.\"\"\"\n return message.user in users\n\n func = add_skill_attributes(func)\n if invert:\n constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_connectors(connectors, invert=False):\n \"\"\"Return connector constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add connectors constraint to skill.\"\"\"\n\n def constraint_callback(message, connectors=connectors):\n \"\"\"Check if the connectors is correct.\"\"\"\n return message.connector and (message.connector.name in connectors)\n\n func = add_skill_attributes(func)\n if invert:\n constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n", "path": "opsdroid/constraints.py"}]}
| 825 | 426 |
gh_patches_debug_13732
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-3113
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
print(torch.DoubleTensor(1)) # ZeroDivisionError: float division by zero
on print(torch.DoubleTensor(1)) I got ZeroDivisionError: float division by zero
but torch.DoubleTensor(0)) or torch.DoubleTensor(2)) work just fine.
I work in Jupyter notebook pytorch 0.1.12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/_tensor_str.py`
Content:
```
1 import math
2 import torch
3 from functools import reduce
4 from ._utils import _range
5
6
7 class __PrinterOptions(object):
8 precision = 4
9 threshold = 1000
10 edgeitems = 3
11 linewidth = 80
12
13
14 PRINT_OPTS = __PrinterOptions()
15 SCALE_FORMAT = '{:.5e} *\n'
16
17
18 # We could use **kwargs, but this will give better docs
19 def set_printoptions(
20 precision=None,
21 threshold=None,
22 edgeitems=None,
23 linewidth=None,
24 profile=None,
25 ):
26 """Set options for printing. Items shamelessly taken from Numpy
27
28 Args:
29 precision: Number of digits of precision for floating point output
30 (default 8).
31 threshold: Total number of array elements which trigger summarization
32 rather than full repr (default 1000).
33 edgeitems: Number of array items in summary at beginning and end of
34 each dimension (default 3).
35 linewidth: The number of characters per line for the purpose of
36 inserting line breaks (default 80). Thresholded matricies will
37 ignore this parameter.
38 profile: Sane defaults for pretty printing. Can override with any of
39 the above options. (default, short, full)
40 """
41 if profile is not None:
42 if profile == "default":
43 PRINT_OPTS.precision = 4
44 PRINT_OPTS.threshold = 1000
45 PRINT_OPTS.edgeitems = 3
46 PRINT_OPTS.linewidth = 80
47 elif profile == "short":
48 PRINT_OPTS.precision = 2
49 PRINT_OPTS.threshold = 1000
50 PRINT_OPTS.edgeitems = 2
51 PRINT_OPTS.linewidth = 80
52 elif profile == "full":
53 PRINT_OPTS.precision = 4
54 PRINT_OPTS.threshold = float('inf')
55 PRINT_OPTS.edgeitems = 3
56 PRINT_OPTS.linewidth = 80
57
58 if precision is not None:
59 PRINT_OPTS.precision = precision
60 if threshold is not None:
61 PRINT_OPTS.threshold = threshold
62 if edgeitems is not None:
63 PRINT_OPTS.edgeitems = edgeitems
64 if linewidth is not None:
65 PRINT_OPTS.linewidth = linewidth
66
67
68 def _number_format(tensor, min_sz=-1):
69 min_sz = max(min_sz, 2)
70 tensor = torch.DoubleTensor(tensor.size()).copy_(tensor).abs_().view(tensor.nelement())
71
72 pos_inf_mask = tensor.eq(float('inf'))
73 neg_inf_mask = tensor.eq(float('-inf'))
74 nan_mask = tensor.ne(tensor)
75 invalid_value_mask = pos_inf_mask + neg_inf_mask + nan_mask
76 if invalid_value_mask.all():
77 example_value = 0
78 else:
79 example_value = tensor[invalid_value_mask.eq(0)][0]
80 tensor[invalid_value_mask] = example_value
81 if invalid_value_mask.any():
82 min_sz = max(min_sz, 3)
83
84 int_mode = True
85 # TODO: use fmod?
86 for value in tensor:
87 if value != math.ceil(value):
88 int_mode = False
89 break
90
91 exp_min = tensor.min()
92 if exp_min != 0:
93 exp_min = math.floor(math.log10(exp_min)) + 1
94 else:
95 exp_min = 1
96 exp_max = tensor.max()
97 if exp_max != 0:
98 exp_max = math.floor(math.log10(exp_max)) + 1
99 else:
100 exp_max = 1
101
102 scale = 1
103 exp_max = int(exp_max)
104 prec = PRINT_OPTS.precision
105 if int_mode:
106 if exp_max > prec + 1:
107 format = '{{:11.{}e}}'.format(prec)
108 sz = max(min_sz, 7 + prec)
109 else:
110 sz = max(min_sz, exp_max + 1)
111 format = '{:' + str(sz) + '.0f}'
112 else:
113 if exp_max - exp_min > prec:
114 sz = 7 + prec
115 if abs(exp_max) > 99 or abs(exp_min) > 99:
116 sz = sz + 1
117 sz = max(min_sz, sz)
118 format = '{{:{}.{}e}}'.format(sz, prec)
119 else:
120 if exp_max > prec + 1 or exp_max < 0:
121 sz = max(min_sz, 7)
122 scale = math.pow(10, exp_max - 1)
123 else:
124 if exp_max == 0:
125 sz = 7
126 else:
127 sz = exp_max + 6
128 sz = max(min_sz, sz)
129 format = '{{:{}.{}f}}'.format(sz, prec)
130 return format, scale, sz
131
132
133 def _tensor_str(self):
134 n = PRINT_OPTS.edgeitems
135 has_hdots = self.size()[-1] > 2 * n
136 has_vdots = self.size()[-2] > 2 * n
137 print_full_mat = not has_hdots and not has_vdots
138 formatter = _number_format(self, min_sz=3 if not print_full_mat else 0)
139 print_dots = self.numel() >= PRINT_OPTS.threshold
140
141 dim_sz = max(2, max(len(str(x)) for x in self.size()))
142 dim_fmt = "{:^" + str(dim_sz) + "}"
143 dot_fmt = u"{:^" + str(dim_sz + 1) + "}"
144
145 counter_dim = self.ndimension() - 2
146 counter = torch.LongStorage(counter_dim).fill_(0)
147 counter[counter.size() - 1] = -1
148 finished = False
149 strt = ''
150 while True:
151 nrestarted = [False for i in counter]
152 nskipped = [False for i in counter]
153 for i in _range(counter_dim - 1, -1, -1):
154 counter[i] += 1
155 if print_dots and counter[i] == n and self.size(i) > 2 * n:
156 counter[i] = self.size(i) - n
157 nskipped[i] = True
158 if counter[i] == self.size(i):
159 if i == 0:
160 finished = True
161 counter[i] = 0
162 nrestarted[i] = True
163 else:
164 break
165 if finished:
166 break
167 elif print_dots:
168 if any(nskipped):
169 for hdot in nskipped:
170 strt += dot_fmt.format('...') if hdot \
171 else dot_fmt.format('')
172 strt += '\n'
173 if any(nrestarted):
174 strt += ' '
175 for vdot in nrestarted:
176 strt += dot_fmt.format(u'\u22EE' if vdot else '')
177 strt += '\n'
178 if strt != '':
179 strt += '\n'
180 strt += '({},.,.) = \n'.format(
181 ','.join(dim_fmt.format(i) for i in counter))
182 submatrix = reduce(lambda t, i: t.select(0, i), counter, self)
183 strt += _matrix_str(submatrix, ' ', formatter, print_dots)
184 return strt
185
186
187 def __repr_row(row, indent, fmt, scale, sz, truncate=None):
188 if truncate is not None:
189 dotfmt = " {:^5} "
190 return (indent +
191 ' '.join(fmt.format(val / scale) for val in row[:truncate]) +
192 dotfmt.format('...') +
193 ' '.join(fmt.format(val / scale) for val in row[-truncate:]) +
194 '\n')
195 else:
196 return indent + ' '.join(fmt.format(val / scale) for val in row) + '\n'
197
198
199 def _matrix_str(self, indent='', formatter=None, force_truncate=False):
200 n = PRINT_OPTS.edgeitems
201 has_hdots = self.size(1) > 2 * n
202 has_vdots = self.size(0) > 2 * n
203 print_full_mat = not has_hdots and not has_vdots
204
205 if formatter is None:
206 fmt, scale, sz = _number_format(self,
207 min_sz=5 if not print_full_mat else 0)
208 else:
209 fmt, scale, sz = formatter
210 nColumnPerLine = int(math.floor((PRINT_OPTS.linewidth - len(indent)) / (sz + 1)))
211 strt = ''
212 firstColumn = 0
213
214 if not force_truncate and \
215 (self.numel() < PRINT_OPTS.threshold or print_full_mat):
216 while firstColumn < self.size(1):
217 lastColumn = min(firstColumn + nColumnPerLine - 1, self.size(1) - 1)
218 if nColumnPerLine < self.size(1):
219 strt += '\n' if firstColumn != 1 else ''
220 strt += 'Columns {} to {} \n{}'.format(
221 firstColumn, lastColumn, indent)
222 if scale != 1:
223 strt += SCALE_FORMAT.format(scale)
224 for l in _range(self.size(0)):
225 strt += indent + (' ' if scale != 1 else '')
226 row_slice = self[l, firstColumn:lastColumn + 1]
227 strt += ' '.join(fmt.format(val / scale) for val in row_slice)
228 strt += '\n'
229 firstColumn = lastColumn + 1
230 else:
231 if scale != 1:
232 strt += SCALE_FORMAT.format(scale)
233 if has_vdots and has_hdots:
234 vdotfmt = "{:^" + str((sz + 1) * n - 1) + "}"
235 ddotfmt = u"{:^5}"
236 for row in self[:n]:
237 strt += __repr_row(row, indent, fmt, scale, sz, n)
238 strt += indent + ' '.join([vdotfmt.format('...'),
239 ddotfmt.format(u'\u22F1'),
240 vdotfmt.format('...')]) + "\n"
241 for row in self[-n:]:
242 strt += __repr_row(row, indent, fmt, scale, sz, n)
243 elif not has_vdots and has_hdots:
244 for row in self:
245 strt += __repr_row(row, indent, fmt, scale, sz, n)
246 elif has_vdots and not has_hdots:
247 vdotfmt = u"{:^" + \
248 str(len(__repr_row(self[0], '', fmt, scale, sz))) + \
249 "}\n"
250 for row in self[:n]:
251 strt += __repr_row(row, indent, fmt, scale, sz)
252 strt += vdotfmt.format(u'\u22EE')
253 for row in self[-n:]:
254 strt += __repr_row(row, indent, fmt, scale, sz)
255 else:
256 for row in self:
257 strt += __repr_row(row, indent, fmt, scale, sz)
258 return strt
259
260
261 def _vector_str(self):
262 fmt, scale, sz = _number_format(self)
263 strt = ''
264 ident = ''
265 n = PRINT_OPTS.edgeitems
266 dotfmt = u"{:^" + str(sz) + "}\n"
267 if scale != 1:
268 strt += SCALE_FORMAT.format(scale)
269 ident = ' '
270 if self.numel() < PRINT_OPTS.threshold:
271 return (strt +
272 '\n'.join(ident + fmt.format(val / scale) for val in self) +
273 '\n')
274 else:
275 return (strt +
276 '\n'.join(ident + fmt.format(val / scale) for val in self[:n]) +
277 '\n' + (ident + dotfmt.format(u"\u22EE")) +
278 '\n'.join(ident + fmt.format(val / scale) for val in self[-n:]) +
279 '\n')
280
281
282 def _str(self):
283 if self.ndimension() == 0:
284 return '[{} with no dimension]\n'.format(torch.typename(self))
285 elif self.ndimension() == 1:
286 strt = _vector_str(self)
287 elif self.ndimension() == 2:
288 strt = _matrix_str(self)
289 else:
290 strt = _tensor_str(self)
291
292 size_str = 'x'.join(str(size) for size in self.size())
293 device_str = '' if not self.is_cuda else \
294 ' (GPU {})'.format(self.get_device())
295 strt += '[{} of size {}{}]\n'.format(torch.typename(self),
296 size_str, device_str)
297 return '\n' + strt
298
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py
--- a/torch/_tensor_str.py
+++ b/torch/_tensor_str.py
@@ -2,6 +2,10 @@
import torch
from functools import reduce
from ._utils import _range
+from sys import float_info
+
+
+__MIN_LOG_SCALE = math.ceil(math.log(float_info.min * float_info.epsilon, 10))
class __PrinterOptions(object):
@@ -119,7 +123,7 @@
else:
if exp_max > prec + 1 or exp_max < 0:
sz = max(min_sz, 7)
- scale = math.pow(10, exp_max - 1)
+ scale = math.pow(10, max(exp_max - 1, __MIN_LOG_SCALE))
else:
if exp_max == 0:
sz = 7
|
{"golden_diff": "diff --git a/torch/_tensor_str.py b/torch/_tensor_str.py\n--- a/torch/_tensor_str.py\n+++ b/torch/_tensor_str.py\n@@ -2,6 +2,10 @@\n import torch\n from functools import reduce\n from ._utils import _range\n+from sys import float_info\n+\n+\n+__MIN_LOG_SCALE = math.ceil(math.log(float_info.min * float_info.epsilon, 10))\n \n \n class __PrinterOptions(object):\n@@ -119,7 +123,7 @@\n else:\n if exp_max > prec + 1 or exp_max < 0:\n sz = max(min_sz, 7)\n- scale = math.pow(10, exp_max - 1)\n+ scale = math.pow(10, max(exp_max - 1, __MIN_LOG_SCALE))\n else:\n if exp_max == 0:\n sz = 7\n", "issue": "print(torch.DoubleTensor(1)) # ZeroDivisionError: float division by zero\non print(torch.DoubleTensor(1)) I got ZeroDivisionError: float division by zero\r\nbut torch.DoubleTensor(0)) or torch.DoubleTensor(2)) work just fine.\r\nI work in Jupyter notebook pytorch 0.1.12\n", "before_files": [{"content": "import math\nimport torch\nfrom functools import reduce\nfrom ._utils import _range\n\n\nclass __PrinterOptions(object):\n precision = 4\n threshold = 1000\n edgeitems = 3\n linewidth = 80\n\n\nPRINT_OPTS = __PrinterOptions()\nSCALE_FORMAT = '{:.5e} *\\n'\n\n\n# We could use **kwargs, but this will give better docs\ndef set_printoptions(\n precision=None,\n threshold=None,\n edgeitems=None,\n linewidth=None,\n profile=None,\n):\n \"\"\"Set options for printing. Items shamelessly taken from Numpy\n\n Args:\n precision: Number of digits of precision for floating point output\n (default 8).\n threshold: Total number of array elements which trigger summarization\n rather than full repr (default 1000).\n edgeitems: Number of array items in summary at beginning and end of\n each dimension (default 3).\n linewidth: The number of characters per line for the purpose of\n inserting line breaks (default 80). Thresholded matricies will\n ignore this parameter.\n profile: Sane defaults for pretty printing. Can override with any of\n the above options. (default, short, full)\n \"\"\"\n if profile is not None:\n if profile == \"default\":\n PRINT_OPTS.precision = 4\n PRINT_OPTS.threshold = 1000\n PRINT_OPTS.edgeitems = 3\n PRINT_OPTS.linewidth = 80\n elif profile == \"short\":\n PRINT_OPTS.precision = 2\n PRINT_OPTS.threshold = 1000\n PRINT_OPTS.edgeitems = 2\n PRINT_OPTS.linewidth = 80\n elif profile == \"full\":\n PRINT_OPTS.precision = 4\n PRINT_OPTS.threshold = float('inf')\n PRINT_OPTS.edgeitems = 3\n PRINT_OPTS.linewidth = 80\n\n if precision is not None:\n PRINT_OPTS.precision = precision\n if threshold is not None:\n PRINT_OPTS.threshold = threshold\n if edgeitems is not None:\n PRINT_OPTS.edgeitems = edgeitems\n if linewidth is not None:\n PRINT_OPTS.linewidth = linewidth\n\n\ndef _number_format(tensor, min_sz=-1):\n min_sz = max(min_sz, 2)\n tensor = torch.DoubleTensor(tensor.size()).copy_(tensor).abs_().view(tensor.nelement())\n\n pos_inf_mask = tensor.eq(float('inf'))\n neg_inf_mask = tensor.eq(float('-inf'))\n nan_mask = tensor.ne(tensor)\n invalid_value_mask = pos_inf_mask + neg_inf_mask + nan_mask\n if invalid_value_mask.all():\n example_value = 0\n else:\n example_value = tensor[invalid_value_mask.eq(0)][0]\n tensor[invalid_value_mask] = example_value\n if invalid_value_mask.any():\n min_sz = max(min_sz, 3)\n\n int_mode = True\n # TODO: use fmod?\n for value in tensor:\n if value != math.ceil(value):\n int_mode = False\n break\n\n exp_min = tensor.min()\n if exp_min != 0:\n exp_min = math.floor(math.log10(exp_min)) + 1\n else:\n exp_min = 1\n exp_max = tensor.max()\n if exp_max != 0:\n exp_max = math.floor(math.log10(exp_max)) + 1\n else:\n exp_max = 1\n\n scale = 1\n exp_max = int(exp_max)\n prec = PRINT_OPTS.precision\n if int_mode:\n if exp_max > prec + 1:\n format = '{{:11.{}e}}'.format(prec)\n sz = max(min_sz, 7 + prec)\n else:\n sz = max(min_sz, exp_max + 1)\n format = '{:' + str(sz) + '.0f}'\n else:\n if exp_max - exp_min > prec:\n sz = 7 + prec\n if abs(exp_max) > 99 or abs(exp_min) > 99:\n sz = sz + 1\n sz = max(min_sz, sz)\n format = '{{:{}.{}e}}'.format(sz, prec)\n else:\n if exp_max > prec + 1 or exp_max < 0:\n sz = max(min_sz, 7)\n scale = math.pow(10, exp_max - 1)\n else:\n if exp_max == 0:\n sz = 7\n else:\n sz = exp_max + 6\n sz = max(min_sz, sz)\n format = '{{:{}.{}f}}'.format(sz, prec)\n return format, scale, sz\n\n\ndef _tensor_str(self):\n n = PRINT_OPTS.edgeitems\n has_hdots = self.size()[-1] > 2 * n\n has_vdots = self.size()[-2] > 2 * n\n print_full_mat = not has_hdots and not has_vdots\n formatter = _number_format(self, min_sz=3 if not print_full_mat else 0)\n print_dots = self.numel() >= PRINT_OPTS.threshold\n\n dim_sz = max(2, max(len(str(x)) for x in self.size()))\n dim_fmt = \"{:^\" + str(dim_sz) + \"}\"\n dot_fmt = u\"{:^\" + str(dim_sz + 1) + \"}\"\n\n counter_dim = self.ndimension() - 2\n counter = torch.LongStorage(counter_dim).fill_(0)\n counter[counter.size() - 1] = -1\n finished = False\n strt = ''\n while True:\n nrestarted = [False for i in counter]\n nskipped = [False for i in counter]\n for i in _range(counter_dim - 1, -1, -1):\n counter[i] += 1\n if print_dots and counter[i] == n and self.size(i) > 2 * n:\n counter[i] = self.size(i) - n\n nskipped[i] = True\n if counter[i] == self.size(i):\n if i == 0:\n finished = True\n counter[i] = 0\n nrestarted[i] = True\n else:\n break\n if finished:\n break\n elif print_dots:\n if any(nskipped):\n for hdot in nskipped:\n strt += dot_fmt.format('...') if hdot \\\n else dot_fmt.format('')\n strt += '\\n'\n if any(nrestarted):\n strt += ' '\n for vdot in nrestarted:\n strt += dot_fmt.format(u'\\u22EE' if vdot else '')\n strt += '\\n'\n if strt != '':\n strt += '\\n'\n strt += '({},.,.) = \\n'.format(\n ','.join(dim_fmt.format(i) for i in counter))\n submatrix = reduce(lambda t, i: t.select(0, i), counter, self)\n strt += _matrix_str(submatrix, ' ', formatter, print_dots)\n return strt\n\n\ndef __repr_row(row, indent, fmt, scale, sz, truncate=None):\n if truncate is not None:\n dotfmt = \" {:^5} \"\n return (indent +\n ' '.join(fmt.format(val / scale) for val in row[:truncate]) +\n dotfmt.format('...') +\n ' '.join(fmt.format(val / scale) for val in row[-truncate:]) +\n '\\n')\n else:\n return indent + ' '.join(fmt.format(val / scale) for val in row) + '\\n'\n\n\ndef _matrix_str(self, indent='', formatter=None, force_truncate=False):\n n = PRINT_OPTS.edgeitems\n has_hdots = self.size(1) > 2 * n\n has_vdots = self.size(0) > 2 * n\n print_full_mat = not has_hdots and not has_vdots\n\n if formatter is None:\n fmt, scale, sz = _number_format(self,\n min_sz=5 if not print_full_mat else 0)\n else:\n fmt, scale, sz = formatter\n nColumnPerLine = int(math.floor((PRINT_OPTS.linewidth - len(indent)) / (sz + 1)))\n strt = ''\n firstColumn = 0\n\n if not force_truncate and \\\n (self.numel() < PRINT_OPTS.threshold or print_full_mat):\n while firstColumn < self.size(1):\n lastColumn = min(firstColumn + nColumnPerLine - 1, self.size(1) - 1)\n if nColumnPerLine < self.size(1):\n strt += '\\n' if firstColumn != 1 else ''\n strt += 'Columns {} to {} \\n{}'.format(\n firstColumn, lastColumn, indent)\n if scale != 1:\n strt += SCALE_FORMAT.format(scale)\n for l in _range(self.size(0)):\n strt += indent + (' ' if scale != 1 else '')\n row_slice = self[l, firstColumn:lastColumn + 1]\n strt += ' '.join(fmt.format(val / scale) for val in row_slice)\n strt += '\\n'\n firstColumn = lastColumn + 1\n else:\n if scale != 1:\n strt += SCALE_FORMAT.format(scale)\n if has_vdots and has_hdots:\n vdotfmt = \"{:^\" + str((sz + 1) * n - 1) + \"}\"\n ddotfmt = u\"{:^5}\"\n for row in self[:n]:\n strt += __repr_row(row, indent, fmt, scale, sz, n)\n strt += indent + ' '.join([vdotfmt.format('...'),\n ddotfmt.format(u'\\u22F1'),\n vdotfmt.format('...')]) + \"\\n\"\n for row in self[-n:]:\n strt += __repr_row(row, indent, fmt, scale, sz, n)\n elif not has_vdots and has_hdots:\n for row in self:\n strt += __repr_row(row, indent, fmt, scale, sz, n)\n elif has_vdots and not has_hdots:\n vdotfmt = u\"{:^\" + \\\n str(len(__repr_row(self[0], '', fmt, scale, sz))) + \\\n \"}\\n\"\n for row in self[:n]:\n strt += __repr_row(row, indent, fmt, scale, sz)\n strt += vdotfmt.format(u'\\u22EE')\n for row in self[-n:]:\n strt += __repr_row(row, indent, fmt, scale, sz)\n else:\n for row in self:\n strt += __repr_row(row, indent, fmt, scale, sz)\n return strt\n\n\ndef _vector_str(self):\n fmt, scale, sz = _number_format(self)\n strt = ''\n ident = ''\n n = PRINT_OPTS.edgeitems\n dotfmt = u\"{:^\" + str(sz) + \"}\\n\"\n if scale != 1:\n strt += SCALE_FORMAT.format(scale)\n ident = ' '\n if self.numel() < PRINT_OPTS.threshold:\n return (strt +\n '\\n'.join(ident + fmt.format(val / scale) for val in self) +\n '\\n')\n else:\n return (strt +\n '\\n'.join(ident + fmt.format(val / scale) for val in self[:n]) +\n '\\n' + (ident + dotfmt.format(u\"\\u22EE\")) +\n '\\n'.join(ident + fmt.format(val / scale) for val in self[-n:]) +\n '\\n')\n\n\ndef _str(self):\n if self.ndimension() == 0:\n return '[{} with no dimension]\\n'.format(torch.typename(self))\n elif self.ndimension() == 1:\n strt = _vector_str(self)\n elif self.ndimension() == 2:\n strt = _matrix_str(self)\n else:\n strt = _tensor_str(self)\n\n size_str = 'x'.join(str(size) for size in self.size())\n device_str = '' if not self.is_cuda else \\\n ' (GPU {})'.format(self.get_device())\n strt += '[{} of size {}{}]\\n'.format(torch.typename(self),\n size_str, device_str)\n return '\\n' + strt\n", "path": "torch/_tensor_str.py"}], "after_files": [{"content": "import math\nimport torch\nfrom functools import reduce\nfrom ._utils import _range\nfrom sys import float_info\n\n\n__MIN_LOG_SCALE = math.ceil(math.log(float_info.min * float_info.epsilon, 10))\n\n\nclass __PrinterOptions(object):\n precision = 4\n threshold = 1000\n edgeitems = 3\n linewidth = 80\n\n\nPRINT_OPTS = __PrinterOptions()\nSCALE_FORMAT = '{:.5e} *\\n'\n\n\n# We could use **kwargs, but this will give better docs\ndef set_printoptions(\n precision=None,\n threshold=None,\n edgeitems=None,\n linewidth=None,\n profile=None,\n):\n \"\"\"Set options for printing. Items shamelessly taken from Numpy\n\n Args:\n precision: Number of digits of precision for floating point output\n (default 8).\n threshold: Total number of array elements which trigger summarization\n rather than full repr (default 1000).\n edgeitems: Number of array items in summary at beginning and end of\n each dimension (default 3).\n linewidth: The number of characters per line for the purpose of\n inserting line breaks (default 80). Thresholded matricies will\n ignore this parameter.\n profile: Sane defaults for pretty printing. Can override with any of\n the above options. (default, short, full)\n \"\"\"\n if profile is not None:\n if profile == \"default\":\n PRINT_OPTS.precision = 4\n PRINT_OPTS.threshold = 1000\n PRINT_OPTS.edgeitems = 3\n PRINT_OPTS.linewidth = 80\n elif profile == \"short\":\n PRINT_OPTS.precision = 2\n PRINT_OPTS.threshold = 1000\n PRINT_OPTS.edgeitems = 2\n PRINT_OPTS.linewidth = 80\n elif profile == \"full\":\n PRINT_OPTS.precision = 4\n PRINT_OPTS.threshold = float('inf')\n PRINT_OPTS.edgeitems = 3\n PRINT_OPTS.linewidth = 80\n\n if precision is not None:\n PRINT_OPTS.precision = precision\n if threshold is not None:\n PRINT_OPTS.threshold = threshold\n if edgeitems is not None:\n PRINT_OPTS.edgeitems = edgeitems\n if linewidth is not None:\n PRINT_OPTS.linewidth = linewidth\n\n\ndef _number_format(tensor, min_sz=-1):\n min_sz = max(min_sz, 2)\n tensor = torch.DoubleTensor(tensor.size()).copy_(tensor).abs_().view(tensor.nelement())\n\n pos_inf_mask = tensor.eq(float('inf'))\n neg_inf_mask = tensor.eq(float('-inf'))\n nan_mask = tensor.ne(tensor)\n invalid_value_mask = pos_inf_mask + neg_inf_mask + nan_mask\n if invalid_value_mask.all():\n example_value = 0\n else:\n example_value = tensor[invalid_value_mask.eq(0)][0]\n tensor[invalid_value_mask] = example_value\n if invalid_value_mask.any():\n min_sz = max(min_sz, 3)\n\n int_mode = True\n # TODO: use fmod?\n for value in tensor:\n if value != math.ceil(value):\n int_mode = False\n break\n\n exp_min = tensor.min()\n if exp_min != 0:\n exp_min = math.floor(math.log10(exp_min)) + 1\n else:\n exp_min = 1\n exp_max = tensor.max()\n if exp_max != 0:\n exp_max = math.floor(math.log10(exp_max)) + 1\n else:\n exp_max = 1\n\n scale = 1\n exp_max = int(exp_max)\n prec = PRINT_OPTS.precision\n if int_mode:\n if exp_max > prec + 1:\n format = '{{:11.{}e}}'.format(prec)\n sz = max(min_sz, 7 + prec)\n else:\n sz = max(min_sz, exp_max + 1)\n format = '{:' + str(sz) + '.0f}'\n else:\n if exp_max - exp_min > prec:\n sz = 7 + prec\n if abs(exp_max) > 99 or abs(exp_min) > 99:\n sz = sz + 1\n sz = max(min_sz, sz)\n format = '{{:{}.{}e}}'.format(sz, prec)\n else:\n if exp_max > prec + 1 or exp_max < 0:\n sz = max(min_sz, 7)\n scale = math.pow(10, max(exp_max - 1, __MIN_LOG_SCALE))\n else:\n if exp_max == 0:\n sz = 7\n else:\n sz = exp_max + 6\n sz = max(min_sz, sz)\n format = '{{:{}.{}f}}'.format(sz, prec)\n return format, scale, sz\n\n\ndef _tensor_str(self):\n n = PRINT_OPTS.edgeitems\n has_hdots = self.size()[-1] > 2 * n\n has_vdots = self.size()[-2] > 2 * n\n print_full_mat = not has_hdots and not has_vdots\n formatter = _number_format(self, min_sz=3 if not print_full_mat else 0)\n print_dots = self.numel() >= PRINT_OPTS.threshold\n\n dim_sz = max(2, max(len(str(x)) for x in self.size()))\n dim_fmt = \"{:^\" + str(dim_sz) + \"}\"\n dot_fmt = u\"{:^\" + str(dim_sz + 1) + \"}\"\n\n counter_dim = self.ndimension() - 2\n counter = torch.LongStorage(counter_dim).fill_(0)\n counter[counter.size() - 1] = -1\n finished = False\n strt = ''\n while True:\n nrestarted = [False for i in counter]\n nskipped = [False for i in counter]\n for i in _range(counter_dim - 1, -1, -1):\n counter[i] += 1\n if print_dots and counter[i] == n and self.size(i) > 2 * n:\n counter[i] = self.size(i) - n\n nskipped[i] = True\n if counter[i] == self.size(i):\n if i == 0:\n finished = True\n counter[i] = 0\n nrestarted[i] = True\n else:\n break\n if finished:\n break\n elif print_dots:\n if any(nskipped):\n for hdot in nskipped:\n strt += dot_fmt.format('...') if hdot \\\n else dot_fmt.format('')\n strt += '\\n'\n if any(nrestarted):\n strt += ' '\n for vdot in nrestarted:\n strt += dot_fmt.format(u'\\u22EE' if vdot else '')\n strt += '\\n'\n if strt != '':\n strt += '\\n'\n strt += '({},.,.) = \\n'.format(\n ','.join(dim_fmt.format(i) for i in counter))\n submatrix = reduce(lambda t, i: t.select(0, i), counter, self)\n strt += _matrix_str(submatrix, ' ', formatter, print_dots)\n return strt\n\n\ndef __repr_row(row, indent, fmt, scale, sz, truncate=None):\n if truncate is not None:\n dotfmt = \" {:^5} \"\n return (indent +\n ' '.join(fmt.format(val / scale) for val in row[:truncate]) +\n dotfmt.format('...') +\n ' '.join(fmt.format(val / scale) for val in row[-truncate:]) +\n '\\n')\n else:\n return indent + ' '.join(fmt.format(val / scale) for val in row) + '\\n'\n\n\ndef _matrix_str(self, indent='', formatter=None, force_truncate=False):\n n = PRINT_OPTS.edgeitems\n has_hdots = self.size(1) > 2 * n\n has_vdots = self.size(0) > 2 * n\n print_full_mat = not has_hdots and not has_vdots\n\n if formatter is None:\n fmt, scale, sz = _number_format(self,\n min_sz=5 if not print_full_mat else 0)\n else:\n fmt, scale, sz = formatter\n nColumnPerLine = int(math.floor((PRINT_OPTS.linewidth - len(indent)) / (sz + 1)))\n strt = ''\n firstColumn = 0\n\n if not force_truncate and \\\n (self.numel() < PRINT_OPTS.threshold or print_full_mat):\n while firstColumn < self.size(1):\n lastColumn = min(firstColumn + nColumnPerLine - 1, self.size(1) - 1)\n if nColumnPerLine < self.size(1):\n strt += '\\n' if firstColumn != 1 else ''\n strt += 'Columns {} to {} \\n{}'.format(\n firstColumn, lastColumn, indent)\n if scale != 1:\n strt += SCALE_FORMAT.format(scale)\n for l in _range(self.size(0)):\n strt += indent + (' ' if scale != 1 else '')\n row_slice = self[l, firstColumn:lastColumn + 1]\n strt += ' '.join(fmt.format(val / scale) for val in row_slice)\n strt += '\\n'\n firstColumn = lastColumn + 1\n else:\n if scale != 1:\n strt += SCALE_FORMAT.format(scale)\n if has_vdots and has_hdots:\n vdotfmt = \"{:^\" + str((sz + 1) * n - 1) + \"}\"\n ddotfmt = u\"{:^5}\"\n for row in self[:n]:\n strt += __repr_row(row, indent, fmt, scale, sz, n)\n strt += indent + ' '.join([vdotfmt.format('...'),\n ddotfmt.format(u'\\u22F1'),\n vdotfmt.format('...')]) + \"\\n\"\n for row in self[-n:]:\n strt += __repr_row(row, indent, fmt, scale, sz, n)\n elif not has_vdots and has_hdots:\n for row in self:\n strt += __repr_row(row, indent, fmt, scale, sz, n)\n elif has_vdots and not has_hdots:\n vdotfmt = u\"{:^\" + \\\n str(len(__repr_row(self[0], '', fmt, scale, sz))) + \\\n \"}\\n\"\n for row in self[:n]:\n strt += __repr_row(row, indent, fmt, scale, sz)\n strt += vdotfmt.format(u'\\u22EE')\n for row in self[-n:]:\n strt += __repr_row(row, indent, fmt, scale, sz)\n else:\n for row in self:\n strt += __repr_row(row, indent, fmt, scale, sz)\n return strt\n\n\ndef _vector_str(self):\n fmt, scale, sz = _number_format(self)\n strt = ''\n ident = ''\n n = PRINT_OPTS.edgeitems\n dotfmt = u\"{:^\" + str(sz) + \"}\\n\"\n if scale != 1:\n strt += SCALE_FORMAT.format(scale)\n ident = ' '\n if self.numel() < PRINT_OPTS.threshold:\n return (strt +\n '\\n'.join(ident + fmt.format(val / scale) for val in self) +\n '\\n')\n else:\n return (strt +\n '\\n'.join(ident + fmt.format(val / scale) for val in self[:n]) +\n '\\n' + (ident + dotfmt.format(u\"\\u22EE\")) +\n '\\n'.join(ident + fmt.format(val / scale) for val in self[-n:]) +\n '\\n')\n\n\ndef _str(self):\n if self.ndimension() == 0:\n return '[{} with no dimension]\\n'.format(torch.typename(self))\n elif self.ndimension() == 1:\n strt = _vector_str(self)\n elif self.ndimension() == 2:\n strt = _matrix_str(self)\n else:\n strt = _tensor_str(self)\n\n size_str = 'x'.join(str(size) for size in self.size())\n device_str = '' if not self.is_cuda else \\\n ' (GPU {})'.format(self.get_device())\n strt += '[{} of size {}{}]\\n'.format(torch.typename(self),\n size_str, device_str)\n return '\\n' + strt\n", "path": "torch/_tensor_str.py"}]}
| 3,789 | 198 |
gh_patches_debug_24278
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-783
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
if person browsing the site has the url, they can view user detail pages
while investigating #479, I discovered that the detail pages for users who are not indexers are visible to not-logged-in users if you know the URL (e.g. http://206.12.88.113/user/1327). User detail pages should only be accessible to not-logged-in visitor if the user's `is-indexer` field is set to `True` (e.g. http://206.12.88.113/user/613 is (correctly) visible to everyone).
This is not enormously urgent - I don't believe there are any links to the detail pages of non-indexer users displayed anywhere on the site, so you have to know the URL to visit these pages. But there's the potential of people's information (i.e. name and institution) being divulged even if they're not officially affiliated with CantusDB. And these pages should not be visible to anonymous users anyways.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/views/user.py`
Content:
```
1 from django.urls import reverse
2 from django.db.models.aggregates import Count
3 from django.views.generic import DetailView
4 from django.contrib.auth import get_user_model, login as auth_login
5 from main_app.models import Source
6 from django.views.generic import ListView
7 from django.contrib.auth.mixins import LoginRequiredMixin
8 from django.db.models import Q
9 from django.core.paginator import Paginator
10 from django.contrib.auth.views import LogoutView, LoginView
11 from django.contrib import messages
12 from extra_views import SearchableListMixin
13 from django.http import HttpResponseRedirect
14
15
16 class UserDetailView(DetailView):
17 """Detail view for User model
18
19 Accessed by /users/<pk>
20 """
21
22 model = get_user_model()
23 context_object_name = "user"
24 template_name = "user_detail.html"
25
26 def get_context_data(self, **kwargs):
27 context = super().get_context_data(**kwargs)
28 user = self.get_object()
29 display_unpublished = self.request.user.is_authenticated
30 sort_by_siglum = lambda source: source.siglum
31 if display_unpublished:
32 context["inventoried_sources"] = sorted(
33 user.inventoried_sources.all(), key=sort_by_siglum
34 )
35 context["full_text_sources"] = sorted(
36 user.entered_full_text_for_sources.all(), key=sort_by_siglum
37 )
38 context["melody_sources"] = sorted(
39 user.entered_melody_for_sources.all(), key=sort_by_siglum
40 )
41 context["proofread_sources"] = sorted(
42 user.proofread_sources.all(), key=sort_by_siglum
43 )
44 context["edited_sources"] = sorted(
45 user.edited_sources.all(), key=sort_by_siglum
46 )
47 else:
48 context["inventoried_sources"] = sorted(
49 user.inventoried_sources.all().filter(published=True),
50 key=sort_by_siglum,
51 )
52 context["full_text_sources"] = sorted(
53 user.entered_full_text_for_sources.all().filter(published=True),
54 key=sort_by_siglum,
55 )
56 context["melody_sources"] = sorted(
57 user.entered_melody_for_sources.all().filter(published=True),
58 key=sort_by_siglum,
59 )
60 context["proofread_sources"] = sorted(
61 user.proofread_sources.all().filter(published=True), key=sort_by_siglum
62 )
63 context["edited_sources"] = sorted(
64 user.edited_sources.all().filter(published=True), key=sort_by_siglum
65 )
66
67 return context
68
69
70 class UserSourceListView(LoginRequiredMixin, ListView):
71 model = Source
72 context_object_name = "sources"
73 template_name = "user_source_list.html"
74 paginate_by = 100
75
76 def get_queryset(self):
77 return (
78 Source.objects.filter(
79 Q(current_editors=self.request.user)
80 | Q(created_by=self.request.user)
81 # | Q(inventoried_by=self.request.user)
82 # | Q(full_text_entered_by=self.request.user)
83 # | Q(melodies_entered_by=self.request.user)
84 # | Q(proofreaders=self.request.user)
85 # | Q(other_editors=self.request.user)
86 )
87 .order_by("-date_created")
88 .distinct()
89 )
90
91 def get_context_data(self, **kwargs):
92 context = super().get_context_data(**kwargs)
93
94 user_created_sources = (
95 Source.objects.filter(created_by=self.request.user)
96 .order_by("-date_created")
97 .distinct()
98 )
99 paginator = Paginator(user_created_sources, 10)
100 page_number = self.request.GET.get("page2")
101 page_obj = paginator.get_page(page_number)
102
103 context["user_created_sources_page_obj"] = page_obj
104 return context
105
106
107 class CustomLogoutView(LogoutView):
108 def get_next_page(self):
109 next_page = super().get_next_page()
110 messages.success(self.request, "You have successfully logged out!")
111 return next_page
112
113
114 class UserListView(LoginRequiredMixin, SearchableListMixin, ListView):
115 """A list of all User objects
116
117 This view is equivalent to the user list view on the old Cantus.
118 This includes all User objects on the old Cantus.
119 When passed a `?q=<query>` argument in the GET request, it will filter users
120 based on the fields defined in `search_fields` with the `icontains` lookup.
121
122 Accessed by /users/
123 """
124
125 model = get_user_model()
126 ordering = "full_name"
127 search_fields = ["full_name", "institution", "city", "country"]
128 paginate_by = 100
129 template_name = "user_list.html"
130 context_object_name = "users"
131
132
133 class IndexerListView(SearchableListMixin, ListView):
134 """A list of User objects shown to the public
135
136 This view replaces the indexer list view on the old Cantus.
137 The indexers are considered a subset of all User objects, the subset shown to the public.
138 This includes the User objects corresponding to Indexer objects on the old Cantus.
139 When passed a `?q=<query>` argument in the GET request, it will filter users
140 based on the fields defined in `search_fields` with the `icontains` lookup.
141
142 Accessed by /indexers/
143 """
144
145 model = get_user_model()
146 ordering = "full_name"
147 search_fields = ["full_name", "institution", "city", "country"]
148 paginate_by = 100
149 template_name = "indexer_list.html"
150 context_object_name = "indexers"
151
152 def get_queryset(self):
153 all_users = super().get_queryset()
154 indexers = all_users.filter(is_indexer=True)
155 display_unpublished = self.request.user.is_authenticated
156 if display_unpublished:
157 indexers = indexers.annotate(source_count=Count("inventoried_sources"))
158 # display those who have at least one source
159 return indexers.filter(source_count__gte=1)
160 else:
161 indexers = indexers.annotate(
162 source_count=Count(
163 "inventoried_sources", filter=Q(inventoried_sources__published=True)
164 )
165 )
166 # display those who have at least one published source
167 return indexers.filter(source_count__gte=1)
168
169
170 class CustomLoginView(LoginView):
171 def form_valid(self, form):
172 auth_login(self.request, form.get_user())
173 # if the user has not yet changed the initial password that was assigned to them,
174 # redirect them to the change-password page everytime they log in
175 # with warning messages prompting them to change their password
176 if form.get_user().changed_initial_password == False:
177 return HttpResponseRedirect(reverse("change-password"))
178 return HttpResponseRedirect(self.get_success_url())
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py
--- a/django/cantusdb_project/main_app/views/user.py
+++ b/django/cantusdb_project/main_app/views/user.py
@@ -11,6 +11,7 @@
from django.contrib import messages
from extra_views import SearchableListMixin
from django.http import HttpResponseRedirect
+from django.core.exceptions import PermissionDenied
class UserDetailView(DetailView):
@@ -24,9 +25,16 @@
template_name = "user_detail.html"
def get_context_data(self, **kwargs):
- context = super().get_context_data(**kwargs)
user = self.get_object()
- display_unpublished = self.request.user.is_authenticated
+ # to begin, if the person viewing the site is not logged in,
+ # they should only be able to view the detail pages of indexers,
+ # and not the detail pages of run-of-the-mill users
+ viewing_user = self.request.user
+ if not (viewing_user.is_authenticated or user.is_indexer):
+ raise PermissionDenied()
+
+ context = super().get_context_data(**kwargs)
+ display_unpublished = viewing_user.is_authenticated
sort_by_siglum = lambda source: source.siglum
if display_unpublished:
context["inventoried_sources"] = sorted(
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/user.py b/django/cantusdb_project/main_app/views/user.py\n--- a/django/cantusdb_project/main_app/views/user.py\n+++ b/django/cantusdb_project/main_app/views/user.py\n@@ -11,6 +11,7 @@\n from django.contrib import messages\n from extra_views import SearchableListMixin\n from django.http import HttpResponseRedirect\n+from django.core.exceptions import PermissionDenied\n \n \n class UserDetailView(DetailView):\n@@ -24,9 +25,16 @@\n template_name = \"user_detail.html\"\n \n def get_context_data(self, **kwargs):\n- context = super().get_context_data(**kwargs)\n user = self.get_object()\n- display_unpublished = self.request.user.is_authenticated\n+ # to begin, if the person viewing the site is not logged in,\n+ # they should only be able to view the detail pages of indexers,\n+ # and not the detail pages of run-of-the-mill users\n+ viewing_user = self.request.user\n+ if not (viewing_user.is_authenticated or user.is_indexer):\n+ raise PermissionDenied()\n+\n+ context = super().get_context_data(**kwargs)\n+ display_unpublished = viewing_user.is_authenticated\n sort_by_siglum = lambda source: source.siglum\n if display_unpublished:\n context[\"inventoried_sources\"] = sorted(\n", "issue": "if person browsing the site has the url, they can view user detail pages\nwhile investigating #479, I discovered that the detail pages for users who are not indexers are visible to not-logged-in users if you know the URL (e.g. http://206.12.88.113/user/1327). User detail pages should only be accessible to not-logged-in visitor if the user's `is-indexer` field is set to `True` (e.g. http://206.12.88.113/user/613 is (correctly) visible to everyone).\r\n\r\nThis is not enormously urgent - I don't believe there are any links to the detail pages of non-indexer users displayed anywhere on the site, so you have to know the URL to visit these pages. But there's the potential of people's information (i.e. name and institution) being divulged even if they're not officially affiliated with CantusDB. And these pages should not be visible to anonymous users anyways.\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.db.models.aggregates import Count\nfrom django.views.generic import DetailView\nfrom django.contrib.auth import get_user_model, login as auth_login\nfrom main_app.models import Source\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.views import LogoutView, LoginView\nfrom django.contrib import messages\nfrom extra_views import SearchableListMixin\nfrom django.http import HttpResponseRedirect\n\n\nclass UserDetailView(DetailView):\n \"\"\"Detail view for User model\n\n Accessed by /users/<pk>\n \"\"\"\n\n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user = self.get_object()\n display_unpublished = self.request.user.is_authenticated\n sort_by_siglum = lambda source: source.siglum\n if display_unpublished:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all(), key=sort_by_siglum\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all(), key=sort_by_siglum\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all(), key=sort_by_siglum\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all(), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all(), key=sort_by_siglum\n )\n else:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all().filter(published=True), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all().filter(published=True), key=sort_by_siglum\n )\n\n return context\n\n\nclass UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n context_object_name = \"sources\"\n template_name = \"user_source_list.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return (\n Source.objects.filter(\n Q(current_editors=self.request.user)\n | Q(created_by=self.request.user)\n # | Q(inventoried_by=self.request.user)\n # | Q(full_text_entered_by=self.request.user)\n # | Q(melodies_entered_by=self.request.user)\n # | Q(proofreaders=self.request.user)\n # | Q(other_editors=self.request.user)\n )\n .order_by(\"-date_created\")\n .distinct()\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n user_created_sources = (\n Source.objects.filter(created_by=self.request.user)\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 10)\n page_number = self.request.GET.get(\"page2\")\n page_obj = paginator.get_page(page_number)\n\n context[\"user_created_sources_page_obj\"] = page_obj\n return context\n\n\nclass CustomLogoutView(LogoutView):\n def get_next_page(self):\n next_page = super().get_next_page()\n messages.success(self.request, \"You have successfully logged out!\")\n return next_page\n\n\nclass UserListView(LoginRequiredMixin, SearchableListMixin, ListView):\n \"\"\"A list of all User objects\n\n This view is equivalent to the user list view on the old Cantus.\n This includes all User objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /users/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"user_list.html\"\n context_object_name = \"users\"\n\n\nclass IndexerListView(SearchableListMixin, ListView):\n \"\"\"A list of User objects shown to the public\n\n This view replaces the indexer list view on the old Cantus.\n The indexers are considered a subset of all User objects, the subset shown to the public.\n This includes the User objects corresponding to Indexer objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /indexers/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"indexer_list.html\"\n context_object_name = \"indexers\"\n\n def get_queryset(self):\n all_users = super().get_queryset()\n indexers = all_users.filter(is_indexer=True)\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n indexers = indexers.annotate(source_count=Count(\"inventoried_sources\"))\n # display those who have at least one source\n return indexers.filter(source_count__gte=1)\n else:\n indexers = indexers.annotate(\n source_count=Count(\n \"inventoried_sources\", filter=Q(inventoried_sources__published=True)\n )\n )\n # display those who have at least one published source\n return indexers.filter(source_count__gte=1)\n\n\nclass CustomLoginView(LoginView):\n def form_valid(self, form):\n auth_login(self.request, form.get_user())\n # if the user has not yet changed the initial password that was assigned to them,\n # redirect them to the change-password page everytime they log in\n # with warning messages prompting them to change their password\n if form.get_user().changed_initial_password == False:\n return HttpResponseRedirect(reverse(\"change-password\"))\n return HttpResponseRedirect(self.get_success_url())\n", "path": "django/cantusdb_project/main_app/views/user.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.db.models.aggregates import Count\nfrom django.views.generic import DetailView\nfrom django.contrib.auth import get_user_model, login as auth_login\nfrom main_app.models import Source\nfrom django.views.generic import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.views import LogoutView, LoginView\nfrom django.contrib import messages\nfrom extra_views import SearchableListMixin\nfrom django.http import HttpResponseRedirect\nfrom django.core.exceptions import PermissionDenied\n\n\nclass UserDetailView(DetailView):\n \"\"\"Detail view for User model\n\n Accessed by /users/<pk>\n \"\"\"\n\n model = get_user_model()\n context_object_name = \"user\"\n template_name = \"user_detail.html\"\n\n def get_context_data(self, **kwargs):\n user = self.get_object()\n # to begin, if the person viewing the site is not logged in,\n # they should only be able to view the detail pages of indexers,\n # and not the detail pages of run-of-the-mill users\n viewing_user = self.request.user\n if not (viewing_user.is_authenticated or user.is_indexer):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n display_unpublished = viewing_user.is_authenticated\n sort_by_siglum = lambda source: source.siglum\n if display_unpublished:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all(), key=sort_by_siglum\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all(), key=sort_by_siglum\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all(), key=sort_by_siglum\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all(), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all(), key=sort_by_siglum\n )\n else:\n context[\"inventoried_sources\"] = sorted(\n user.inventoried_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"full_text_sources\"] = sorted(\n user.entered_full_text_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"melody_sources\"] = sorted(\n user.entered_melody_for_sources.all().filter(published=True),\n key=sort_by_siglum,\n )\n context[\"proofread_sources\"] = sorted(\n user.proofread_sources.all().filter(published=True), key=sort_by_siglum\n )\n context[\"edited_sources\"] = sorted(\n user.edited_sources.all().filter(published=True), key=sort_by_siglum\n )\n\n return context\n\n\nclass UserSourceListView(LoginRequiredMixin, ListView):\n model = Source\n context_object_name = \"sources\"\n template_name = \"user_source_list.html\"\n paginate_by = 100\n\n def get_queryset(self):\n return (\n Source.objects.filter(\n Q(current_editors=self.request.user)\n | Q(created_by=self.request.user)\n # | Q(inventoried_by=self.request.user)\n # | Q(full_text_entered_by=self.request.user)\n # | Q(melodies_entered_by=self.request.user)\n # | Q(proofreaders=self.request.user)\n # | Q(other_editors=self.request.user)\n )\n .order_by(\"-date_created\")\n .distinct()\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n user_created_sources = (\n Source.objects.filter(created_by=self.request.user)\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 10)\n page_number = self.request.GET.get(\"page2\")\n page_obj = paginator.get_page(page_number)\n\n context[\"user_created_sources_page_obj\"] = page_obj\n return context\n\n\nclass CustomLogoutView(LogoutView):\n def get_next_page(self):\n next_page = super().get_next_page()\n messages.success(self.request, \"You have successfully logged out!\")\n return next_page\n\n\nclass UserListView(LoginRequiredMixin, SearchableListMixin, ListView):\n \"\"\"A list of all User objects\n\n This view is equivalent to the user list view on the old Cantus.\n This includes all User objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /users/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"user_list.html\"\n context_object_name = \"users\"\n\n\nclass IndexerListView(SearchableListMixin, ListView):\n \"\"\"A list of User objects shown to the public\n\n This view replaces the indexer list view on the old Cantus.\n The indexers are considered a subset of all User objects, the subset shown to the public.\n This includes the User objects corresponding to Indexer objects on the old Cantus.\n When passed a `?q=<query>` argument in the GET request, it will filter users\n based on the fields defined in `search_fields` with the `icontains` lookup.\n\n Accessed by /indexers/\n \"\"\"\n\n model = get_user_model()\n ordering = \"full_name\"\n search_fields = [\"full_name\", \"institution\", \"city\", \"country\"]\n paginate_by = 100\n template_name = \"indexer_list.html\"\n context_object_name = \"indexers\"\n\n def get_queryset(self):\n all_users = super().get_queryset()\n indexers = all_users.filter(is_indexer=True)\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n indexers = indexers.annotate(source_count=Count(\"inventoried_sources\"))\n # display those who have at least one source\n return indexers.filter(source_count__gte=1)\n else:\n indexers = indexers.annotate(\n source_count=Count(\n \"inventoried_sources\", filter=Q(inventoried_sources__published=True)\n )\n )\n # display those who have at least one published source\n return indexers.filter(source_count__gte=1)\n\n\nclass CustomLoginView(LoginView):\n def form_valid(self, form):\n auth_login(self.request, form.get_user())\n # if the user has not yet changed the initial password that was assigned to them,\n # redirect them to the change-password page everytime they log in\n # with warning messages prompting them to change their password\n if form.get_user().changed_initial_password == False:\n return HttpResponseRedirect(reverse(\"change-password\"))\n return HttpResponseRedirect(self.get_success_url())\n", "path": "django/cantusdb_project/main_app/views/user.py"}]}
| 2,368 | 312 |
gh_patches_debug_42793
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-2419
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Performance instrumentation for Sanic Integration
### Problem Statement
Currently the `SanicIntegration` does not start transactions for a request-response-cycle.
### Solution Brainstorm
If Sanic is a WSGI based framework we maybe can use `SentryWsgiMiddleware` like bottle does:
https://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/bottle.py
If not we can have a look at how the low level WSGI integration creates transactions (and continues traces from incoming requests): https://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/wsgi.py#L97-L115
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/sanic.py`
Content:
```
1 import sys
2 import weakref
3 from inspect import isawaitable
4
5 from sentry_sdk._compat import urlparse, reraise
6 from sentry_sdk.hub import Hub
7 from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT
8 from sentry_sdk.utils import (
9 capture_internal_exceptions,
10 event_from_exception,
11 HAS_REAL_CONTEXTVARS,
12 CONTEXTVARS_ERROR_MESSAGE,
13 parse_version,
14 )
15 from sentry_sdk.integrations import Integration, DidNotEnable
16 from sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers
17 from sentry_sdk.integrations.logging import ignore_logger
18
19 from sentry_sdk._types import TYPE_CHECKING
20
21 if TYPE_CHECKING:
22 from typing import Any
23 from typing import Callable
24 from typing import Optional
25 from typing import Union
26 from typing import Tuple
27 from typing import Dict
28
29 from sanic.request import Request, RequestParameters
30
31 from sentry_sdk._types import Event, EventProcessor, Hint
32 from sanic.router import Route
33
34 try:
35 from sanic import Sanic, __version__ as SANIC_VERSION
36 from sanic.exceptions import SanicException
37 from sanic.router import Router
38 from sanic.handlers import ErrorHandler
39 except ImportError:
40 raise DidNotEnable("Sanic not installed")
41
42 old_error_handler_lookup = ErrorHandler.lookup
43 old_handle_request = Sanic.handle_request
44 old_router_get = Router.get
45
46 try:
47 # This method was introduced in Sanic v21.9
48 old_startup = Sanic._startup
49 except AttributeError:
50 pass
51
52
53 class SanicIntegration(Integration):
54 identifier = "sanic"
55 version = None
56
57 @staticmethod
58 def setup_once():
59 # type: () -> None
60
61 SanicIntegration.version = parse_version(SANIC_VERSION)
62
63 if SanicIntegration.version is None:
64 raise DidNotEnable("Unparsable Sanic version: {}".format(SANIC_VERSION))
65
66 if SanicIntegration.version < (0, 8):
67 raise DidNotEnable("Sanic 0.8 or newer required.")
68
69 if not HAS_REAL_CONTEXTVARS:
70 # We better have contextvars or we're going to leak state between
71 # requests.
72 raise DidNotEnable(
73 "The sanic integration for Sentry requires Python 3.7+ "
74 " or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
75 )
76
77 if SANIC_VERSION.startswith("0.8."):
78 # Sanic 0.8 and older creates a logger named "root" and puts a
79 # stringified version of every exception in there (without exc_info),
80 # which our error deduplication can't detect.
81 #
82 # We explicitly check the version here because it is a very
83 # invasive step to ignore this logger and not necessary in newer
84 # versions at all.
85 #
86 # https://github.com/huge-success/sanic/issues/1332
87 ignore_logger("root")
88
89 if SanicIntegration.version < (21, 9):
90 _setup_legacy_sanic()
91 return
92
93 _setup_sanic()
94
95
96 class SanicRequestExtractor(RequestExtractor):
97 def content_length(self):
98 # type: () -> int
99 if self.request.body is None:
100 return 0
101 return len(self.request.body)
102
103 def cookies(self):
104 # type: () -> Dict[str, str]
105 return dict(self.request.cookies)
106
107 def raw_data(self):
108 # type: () -> bytes
109 return self.request.body
110
111 def form(self):
112 # type: () -> RequestParameters
113 return self.request.form
114
115 def is_json(self):
116 # type: () -> bool
117 raise NotImplementedError()
118
119 def json(self):
120 # type: () -> Optional[Any]
121 return self.request.json
122
123 def files(self):
124 # type: () -> RequestParameters
125 return self.request.files
126
127 def size_of_file(self, file):
128 # type: (Any) -> int
129 return len(file.body or ())
130
131
132 def _setup_sanic():
133 # type: () -> None
134 Sanic._startup = _startup
135 ErrorHandler.lookup = _sentry_error_handler_lookup
136
137
138 def _setup_legacy_sanic():
139 # type: () -> None
140 Sanic.handle_request = _legacy_handle_request
141 Router.get = _legacy_router_get
142 ErrorHandler.lookup = _sentry_error_handler_lookup
143
144
145 async def _startup(self):
146 # type: (Sanic) -> None
147 # This happens about as early in the lifecycle as possible, just after the
148 # Request object is created. The body has not yet been consumed.
149 self.signal("http.lifecycle.request")(_hub_enter)
150
151 # This happens after the handler is complete. In v21.9 this signal is not
152 # dispatched when there is an exception. Therefore we need to close out
153 # and call _hub_exit from the custom exception handler as well.
154 # See https://github.com/sanic-org/sanic/issues/2297
155 self.signal("http.lifecycle.response")(_hub_exit)
156
157 # This happens inside of request handling immediately after the route
158 # has been identified by the router.
159 self.signal("http.routing.after")(_set_transaction)
160
161 # The above signals need to be declared before this can be called.
162 await old_startup(self)
163
164
165 async def _hub_enter(request):
166 # type: (Request) -> None
167 hub = Hub.current
168 request.ctx._sentry_do_integration = (
169 hub.get_integration(SanicIntegration) is not None
170 )
171
172 if not request.ctx._sentry_do_integration:
173 return
174
175 weak_request = weakref.ref(request)
176 request.ctx._sentry_hub = Hub(hub)
177 request.ctx._sentry_hub.__enter__()
178
179 with request.ctx._sentry_hub.configure_scope() as scope:
180 scope.clear_breadcrumbs()
181 scope.add_event_processor(_make_request_processor(weak_request))
182
183
184 async def _hub_exit(request, **_):
185 # type: (Request, **Any) -> None
186 request.ctx._sentry_hub.__exit__(None, None, None)
187
188
189 async def _set_transaction(request, route, **kwargs):
190 # type: (Request, Route, **Any) -> None
191 hub = Hub.current
192 if hub.get_integration(SanicIntegration) is not None:
193 with capture_internal_exceptions():
194 with hub.configure_scope() as scope:
195 route_name = route.name.replace(request.app.name, "").strip(".")
196 scope.set_transaction_name(
197 route_name, source=TRANSACTION_SOURCE_COMPONENT
198 )
199
200
201 def _sentry_error_handler_lookup(self, exception, *args, **kwargs):
202 # type: (Any, Exception, *Any, **Any) -> Optional[object]
203 _capture_exception(exception)
204 old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs)
205
206 if old_error_handler is None:
207 return None
208
209 if Hub.current.get_integration(SanicIntegration) is None:
210 return old_error_handler
211
212 async def sentry_wrapped_error_handler(request, exception):
213 # type: (Request, Exception) -> Any
214 try:
215 response = old_error_handler(request, exception)
216 if isawaitable(response):
217 response = await response
218 return response
219 except Exception:
220 # Report errors that occur in Sanic error handler. These
221 # exceptions will not even show up in Sanic's
222 # `sanic.exceptions` logger.
223 exc_info = sys.exc_info()
224 _capture_exception(exc_info)
225 reraise(*exc_info)
226 finally:
227 # As mentioned in previous comment in _startup, this can be removed
228 # after https://github.com/sanic-org/sanic/issues/2297 is resolved
229 if SanicIntegration.version and SanicIntegration.version == (21, 9):
230 await _hub_exit(request)
231
232 return sentry_wrapped_error_handler
233
234
235 async def _legacy_handle_request(self, request, *args, **kwargs):
236 # type: (Any, Request, *Any, **Any) -> Any
237 hub = Hub.current
238 if hub.get_integration(SanicIntegration) is None:
239 return old_handle_request(self, request, *args, **kwargs)
240
241 weak_request = weakref.ref(request)
242
243 with Hub(hub) as hub:
244 with hub.configure_scope() as scope:
245 scope.clear_breadcrumbs()
246 scope.add_event_processor(_make_request_processor(weak_request))
247
248 response = old_handle_request(self, request, *args, **kwargs)
249 if isawaitable(response):
250 response = await response
251
252 return response
253
254
255 def _legacy_router_get(self, *args):
256 # type: (Any, Union[Any, Request]) -> Any
257 rv = old_router_get(self, *args)
258 hub = Hub.current
259 if hub.get_integration(SanicIntegration) is not None:
260 with capture_internal_exceptions():
261 with hub.configure_scope() as scope:
262 if SanicIntegration.version and SanicIntegration.version >= (21, 3):
263 # Sanic versions above and including 21.3 append the app name to the
264 # route name, and so we need to remove it from Route name so the
265 # transaction name is consistent across all versions
266 sanic_app_name = self.ctx.app.name
267 sanic_route = rv[0].name
268
269 if sanic_route.startswith("%s." % sanic_app_name):
270 # We add a 1 to the len of the sanic_app_name because there is a dot
271 # that joins app name and the route name
272 # Format: app_name.route_name
273 sanic_route = sanic_route[len(sanic_app_name) + 1 :]
274
275 scope.set_transaction_name(
276 sanic_route, source=TRANSACTION_SOURCE_COMPONENT
277 )
278 else:
279 scope.set_transaction_name(
280 rv[0].__name__, source=TRANSACTION_SOURCE_COMPONENT
281 )
282
283 return rv
284
285
286 def _capture_exception(exception):
287 # type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None
288 hub = Hub.current
289 integration = hub.get_integration(SanicIntegration)
290 if integration is None:
291 return
292
293 # If an integration is there, a client has to be there.
294 client = hub.client # type: Any
295
296 with capture_internal_exceptions():
297 event, hint = event_from_exception(
298 exception,
299 client_options=client.options,
300 mechanism={"type": "sanic", "handled": False},
301 )
302 hub.capture_event(event, hint=hint)
303
304
305 def _make_request_processor(weak_request):
306 # type: (Callable[[], Request]) -> EventProcessor
307 def sanic_processor(event, hint):
308 # type: (Event, Optional[Hint]) -> Optional[Event]
309
310 try:
311 if hint and issubclass(hint["exc_info"][0], SanicException):
312 return None
313 except KeyError:
314 pass
315
316 request = weak_request()
317 if request is None:
318 return event
319
320 with capture_internal_exceptions():
321 extractor = SanicRequestExtractor(request)
322 extractor.extract_into_event(event)
323
324 request_info = event["request"]
325 urlparts = urlparse.urlsplit(request.url)
326
327 request_info["url"] = "%s://%s%s" % (
328 urlparts.scheme,
329 urlparts.netloc,
330 urlparts.path,
331 )
332
333 request_info["query_string"] = urlparts.query
334 request_info["method"] = request.method
335 request_info["env"] = {"REMOTE_ADDR": request.remote_addr}
336 request_info["headers"] = _filter_headers(dict(request.headers))
337
338 return event
339
340 return sanic_processor
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py
--- a/sentry_sdk/integrations/sanic.py
+++ b/sentry_sdk/integrations/sanic.py
@@ -2,9 +2,11 @@
import weakref
from inspect import isawaitable
+from sentry_sdk import continue_trace
from sentry_sdk._compat import urlparse, reraise
+from sentry_sdk.consts import OP
from sentry_sdk.hub import Hub
-from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT
+from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT, TRANSACTION_SOURCE_URL
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
@@ -19,6 +21,7 @@
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
+ from collections.abc import Container
from typing import Any
from typing import Callable
from typing import Optional
@@ -27,6 +30,7 @@
from typing import Dict
from sanic.request import Request, RequestParameters
+ from sanic.response import BaseHTTPResponse
from sentry_sdk._types import Event, EventProcessor, Hint
from sanic.router import Route
@@ -54,6 +58,16 @@
identifier = "sanic"
version = None
+ def __init__(self, unsampled_statuses=frozenset({404})):
+ # type: (Optional[Container[int]]) -> None
+ """
+ The unsampled_statuses parameter can be used to specify for which HTTP statuses the
+ transactions should not be sent to Sentry. By default, transactions are sent for all
+ HTTP statuses, except 404. Set unsampled_statuses to None to send transactions for all
+ HTTP statuses, including 404.
+ """
+ self._unsampled_statuses = unsampled_statuses or set()
+
@staticmethod
def setup_once():
# type: () -> None
@@ -180,16 +194,45 @@
scope.clear_breadcrumbs()
scope.add_event_processor(_make_request_processor(weak_request))
+ transaction = continue_trace(
+ dict(request.headers),
+ op=OP.HTTP_SERVER,
+ # Unless the request results in a 404 error, the name and source will get overwritten in _set_transaction
+ name=request.path,
+ source=TRANSACTION_SOURCE_URL,
+ )
+ request.ctx._sentry_transaction = request.ctx._sentry_hub.start_transaction(
+ transaction
+ ).__enter__()
+
+
+async def _hub_exit(request, response=None):
+ # type: (Request, Optional[BaseHTTPResponse]) -> None
+ with capture_internal_exceptions():
+ if not request.ctx._sentry_do_integration:
+ return
+
+ integration = Hub.current.get_integration(SanicIntegration) # type: Integration
+
+ response_status = None if response is None else response.status
+
+ # This capture_internal_exceptions block has been intentionally nested here, so that in case an exception
+ # happens while trying to end the transaction, we still attempt to exit the hub.
+ with capture_internal_exceptions():
+ request.ctx._sentry_transaction.set_http_status(response_status)
+ request.ctx._sentry_transaction.sampled &= (
+ isinstance(integration, SanicIntegration)
+ and response_status not in integration._unsampled_statuses
+ )
+ request.ctx._sentry_transaction.__exit__(None, None, None)
-async def _hub_exit(request, **_):
- # type: (Request, **Any) -> None
- request.ctx._sentry_hub.__exit__(None, None, None)
+ request.ctx._sentry_hub.__exit__(None, None, None)
-async def _set_transaction(request, route, **kwargs):
+async def _set_transaction(request, route, **_):
# type: (Request, Route, **Any) -> None
hub = Hub.current
- if hub.get_integration(SanicIntegration) is not None:
+ if request.ctx._sentry_do_integration:
with capture_internal_exceptions():
with hub.configure_scope() as scope:
route_name = route.name.replace(request.app.name, "").strip(".")
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py\n--- a/sentry_sdk/integrations/sanic.py\n+++ b/sentry_sdk/integrations/sanic.py\n@@ -2,9 +2,11 @@\n import weakref\n from inspect import isawaitable\n \n+from sentry_sdk import continue_trace\n from sentry_sdk._compat import urlparse, reraise\n+from sentry_sdk.consts import OP\n from sentry_sdk.hub import Hub\n-from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT\n+from sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT, TRANSACTION_SOURCE_URL\n from sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n@@ -19,6 +21,7 @@\n from sentry_sdk._types import TYPE_CHECKING\n \n if TYPE_CHECKING:\n+ from collections.abc import Container\n from typing import Any\n from typing import Callable\n from typing import Optional\n@@ -27,6 +30,7 @@\n from typing import Dict\n \n from sanic.request import Request, RequestParameters\n+ from sanic.response import BaseHTTPResponse\n \n from sentry_sdk._types import Event, EventProcessor, Hint\n from sanic.router import Route\n@@ -54,6 +58,16 @@\n identifier = \"sanic\"\n version = None\n \n+ def __init__(self, unsampled_statuses=frozenset({404})):\n+ # type: (Optional[Container[int]]) -> None\n+ \"\"\"\n+ The unsampled_statuses parameter can be used to specify for which HTTP statuses the\n+ transactions should not be sent to Sentry. By default, transactions are sent for all\n+ HTTP statuses, except 404. Set unsampled_statuses to None to send transactions for all\n+ HTTP statuses, including 404.\n+ \"\"\"\n+ self._unsampled_statuses = unsampled_statuses or set()\n+\n @staticmethod\n def setup_once():\n # type: () -> None\n@@ -180,16 +194,45 @@\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n \n+ transaction = continue_trace(\n+ dict(request.headers),\n+ op=OP.HTTP_SERVER,\n+ # Unless the request results in a 404 error, the name and source will get overwritten in _set_transaction\n+ name=request.path,\n+ source=TRANSACTION_SOURCE_URL,\n+ )\n+ request.ctx._sentry_transaction = request.ctx._sentry_hub.start_transaction(\n+ transaction\n+ ).__enter__()\n+\n+\n+async def _hub_exit(request, response=None):\n+ # type: (Request, Optional[BaseHTTPResponse]) -> None\n+ with capture_internal_exceptions():\n+ if not request.ctx._sentry_do_integration:\n+ return\n+\n+ integration = Hub.current.get_integration(SanicIntegration) # type: Integration\n+\n+ response_status = None if response is None else response.status\n+\n+ # This capture_internal_exceptions block has been intentionally nested here, so that in case an exception\n+ # happens while trying to end the transaction, we still attempt to exit the hub.\n+ with capture_internal_exceptions():\n+ request.ctx._sentry_transaction.set_http_status(response_status)\n+ request.ctx._sentry_transaction.sampled &= (\n+ isinstance(integration, SanicIntegration)\n+ and response_status not in integration._unsampled_statuses\n+ )\n+ request.ctx._sentry_transaction.__exit__(None, None, None)\n \n-async def _hub_exit(request, **_):\n- # type: (Request, **Any) -> None\n- request.ctx._sentry_hub.__exit__(None, None, None)\n+ request.ctx._sentry_hub.__exit__(None, None, None)\n \n \n-async def _set_transaction(request, route, **kwargs):\n+async def _set_transaction(request, route, **_):\n # type: (Request, Route, **Any) -> None\n hub = Hub.current\n- if hub.get_integration(SanicIntegration) is not None:\n+ if request.ctx._sentry_do_integration:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n route_name = route.name.replace(request.app.name, \"\").strip(\".\")\n", "issue": "Performance instrumentation for Sanic Integration\n### Problem Statement\n\nCurrently the `SanicIntegration` does not start transactions for a request-response-cycle.\n\n### Solution Brainstorm\n\nIf Sanic is a WSGI based framework we maybe can use `SentryWsgiMiddleware` like bottle does: \r\nhttps://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/bottle.py\r\n\r\nIf not we can have a look at how the low level WSGI integration creates transactions (and continues traces from incoming requests): https://github.com/getsentry/sentry-python/blob/master/sentry_sdk/integrations/wsgi.py#L97-L115\n", "before_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n HAS_REAL_CONTEXTVARS,\n CONTEXTVARS_ERROR_MESSAGE,\n parse_version,\n)\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Any\n from typing import Callable\n from typing import Optional\n from typing import Union\n from typing import Tuple\n from typing import Dict\n\n from sanic.request import Request, RequestParameters\n\n from sentry_sdk._types import Event, EventProcessor, Hint\n from sanic.router import Route\n\ntry:\n from sanic import Sanic, __version__ as SANIC_VERSION\n from sanic.exceptions import SanicException\n from sanic.router import Router\n from sanic.handlers import ErrorHandler\nexcept ImportError:\n raise DidNotEnable(\"Sanic not installed\")\n\nold_error_handler_lookup = ErrorHandler.lookup\nold_handle_request = Sanic.handle_request\nold_router_get = Router.get\n\ntry:\n # This method was introduced in Sanic v21.9\n old_startup = Sanic._startup\nexcept AttributeError:\n pass\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n version = None\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n SanicIntegration.version = parse_version(SANIC_VERSION)\n\n if SanicIntegration.version is None:\n raise DidNotEnable(\"Unparsable Sanic version: {}\".format(SANIC_VERSION))\n\n if SanicIntegration.version < (0, 8):\n raise DidNotEnable(\"Sanic 0.8 or newer required.\")\n\n if not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise DidNotEnable(\n \"The sanic integration for Sentry requires Python 3.7+ \"\n \" or the aiocontextvars package.\" + CONTEXTVARS_ERROR_MESSAGE\n )\n\n if SANIC_VERSION.startswith(\"0.8.\"):\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # We explicitly check the version here because it is a very\n # invasive step to ignore this logger and not necessary in newer\n # versions at all.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n if SanicIntegration.version < (21, 9):\n _setup_legacy_sanic()\n return\n\n _setup_sanic()\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def content_length(self):\n # type: () -> int\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def cookies(self):\n # type: () -> Dict[str, str]\n return dict(self.request.cookies)\n\n def raw_data(self):\n # type: () -> bytes\n return self.request.body\n\n def form(self):\n # type: () -> RequestParameters\n return self.request.form\n\n def is_json(self):\n # type: () -> bool\n raise NotImplementedError()\n\n def json(self):\n # type: () -> Optional[Any]\n return self.request.json\n\n def files(self):\n # type: () -> RequestParameters\n return self.request.files\n\n def size_of_file(self, file):\n # type: (Any) -> int\n return len(file.body or ())\n\n\ndef _setup_sanic():\n # type: () -> None\n Sanic._startup = _startup\n ErrorHandler.lookup = _sentry_error_handler_lookup\n\n\ndef _setup_legacy_sanic():\n # type: () -> None\n Sanic.handle_request = _legacy_handle_request\n Router.get = _legacy_router_get\n ErrorHandler.lookup = _sentry_error_handler_lookup\n\n\nasync def _startup(self):\n # type: (Sanic) -> None\n # This happens about as early in the lifecycle as possible, just after the\n # Request object is created. The body has not yet been consumed.\n self.signal(\"http.lifecycle.request\")(_hub_enter)\n\n # This happens after the handler is complete. In v21.9 this signal is not\n # dispatched when there is an exception. Therefore we need to close out\n # and call _hub_exit from the custom exception handler as well.\n # See https://github.com/sanic-org/sanic/issues/2297\n self.signal(\"http.lifecycle.response\")(_hub_exit)\n\n # This happens inside of request handling immediately after the route\n # has been identified by the router.\n self.signal(\"http.routing.after\")(_set_transaction)\n\n # The above signals need to be declared before this can be called.\n await old_startup(self)\n\n\nasync def _hub_enter(request):\n # type: (Request) -> None\n hub = Hub.current\n request.ctx._sentry_do_integration = (\n hub.get_integration(SanicIntegration) is not None\n )\n\n if not request.ctx._sentry_do_integration:\n return\n\n weak_request = weakref.ref(request)\n request.ctx._sentry_hub = Hub(hub)\n request.ctx._sentry_hub.__enter__()\n\n with request.ctx._sentry_hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n\nasync def _hub_exit(request, **_):\n # type: (Request, **Any) -> None\n request.ctx._sentry_hub.__exit__(None, None, None)\n\n\nasync def _set_transaction(request, route, **kwargs):\n # type: (Request, Route, **Any) -> None\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n route_name = route.name.replace(request.app.name, \"\").strip(\".\")\n scope.set_transaction_name(\n route_name, source=TRANSACTION_SOURCE_COMPONENT\n )\n\n\ndef _sentry_error_handler_lookup(self, exception, *args, **kwargs):\n # type: (Any, Exception, *Any, **Any) -> Optional[object]\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n # type: (Request, Exception) -> Any\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n # Report errors that occur in Sanic error handler. These\n # exceptions will not even show up in Sanic's\n # `sanic.exceptions` logger.\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n finally:\n # As mentioned in previous comment in _startup, this can be removed\n # after https://github.com/sanic-org/sanic/issues/2297 is resolved\n if SanicIntegration.version and SanicIntegration.version == (21, 9):\n await _hub_exit(request)\n\n return sentry_wrapped_error_handler\n\n\nasync def _legacy_handle_request(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Any\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n return old_handle_request(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n\ndef _legacy_router_get(self, *args):\n # type: (Any, Union[Any, Request]) -> Any\n rv = old_router_get(self, *args)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n if SanicIntegration.version and SanicIntegration.version >= (21, 3):\n # Sanic versions above and including 21.3 append the app name to the\n # route name, and so we need to remove it from Route name so the\n # transaction name is consistent across all versions\n sanic_app_name = self.ctx.app.name\n sanic_route = rv[0].name\n\n if sanic_route.startswith(\"%s.\" % sanic_app_name):\n # We add a 1 to the len of the sanic_app_name because there is a dot\n # that joins app name and the route name\n # Format: app_name.route_name\n sanic_route = sanic_route[len(sanic_app_name) + 1 :]\n\n scope.set_transaction_name(\n sanic_route, source=TRANSACTION_SOURCE_COMPONENT\n )\n else:\n scope.set_transaction_name(\n rv[0].__name__, source=TRANSACTION_SOURCE_COMPONENT\n )\n\n return rv\n\n\ndef _capture_exception(exception):\n # type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n # type: (Callable[[], Request]) -> EventProcessor\n def sanic_processor(event, hint):\n # type: (Event, Optional[Hint]) -> Optional[Event]\n\n try:\n if hint and issubclass(hint[\"exc_info\"][0], SanicException):\n return None\n except KeyError:\n pass\n\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n urlparts = urlparse.urlsplit(request.url)\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n urlparts.scheme,\n urlparts.netloc,\n urlparts.path,\n )\n\n request_info[\"query_string\"] = urlparts.query\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n", "path": "sentry_sdk/integrations/sanic.py"}], "after_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk import continue_trace\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.consts import OP\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.tracing import TRANSACTION_SOURCE_COMPONENT, TRANSACTION_SOURCE_URL\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n HAS_REAL_CONTEXTVARS,\n CONTEXTVARS_ERROR_MESSAGE,\n parse_version,\n)\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from collections.abc import Container\n from typing import Any\n from typing import Callable\n from typing import Optional\n from typing import Union\n from typing import Tuple\n from typing import Dict\n\n from sanic.request import Request, RequestParameters\n from sanic.response import BaseHTTPResponse\n\n from sentry_sdk._types import Event, EventProcessor, Hint\n from sanic.router import Route\n\ntry:\n from sanic import Sanic, __version__ as SANIC_VERSION\n from sanic.exceptions import SanicException\n from sanic.router import Router\n from sanic.handlers import ErrorHandler\nexcept ImportError:\n raise DidNotEnable(\"Sanic not installed\")\n\nold_error_handler_lookup = ErrorHandler.lookup\nold_handle_request = Sanic.handle_request\nold_router_get = Router.get\n\ntry:\n # This method was introduced in Sanic v21.9\n old_startup = Sanic._startup\nexcept AttributeError:\n pass\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n version = None\n\n def __init__(self, unsampled_statuses=frozenset({404})):\n # type: (Optional[Container[int]]) -> None\n \"\"\"\n The unsampled_statuses parameter can be used to specify for which HTTP statuses the\n transactions should not be sent to Sentry. By default, transactions are sent for all\n HTTP statuses, except 404. Set unsampled_statuses to None to send transactions for all\n HTTP statuses, including 404.\n \"\"\"\n self._unsampled_statuses = unsampled_statuses or set()\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n SanicIntegration.version = parse_version(SANIC_VERSION)\n\n if SanicIntegration.version is None:\n raise DidNotEnable(\"Unparsable Sanic version: {}\".format(SANIC_VERSION))\n\n if SanicIntegration.version < (0, 8):\n raise DidNotEnable(\"Sanic 0.8 or newer required.\")\n\n if not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise DidNotEnable(\n \"The sanic integration for Sentry requires Python 3.7+ \"\n \" or the aiocontextvars package.\" + CONTEXTVARS_ERROR_MESSAGE\n )\n\n if SANIC_VERSION.startswith(\"0.8.\"):\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # We explicitly check the version here because it is a very\n # invasive step to ignore this logger and not necessary in newer\n # versions at all.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n if SanicIntegration.version < (21, 9):\n _setup_legacy_sanic()\n return\n\n _setup_sanic()\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def content_length(self):\n # type: () -> int\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def cookies(self):\n # type: () -> Dict[str, str]\n return dict(self.request.cookies)\n\n def raw_data(self):\n # type: () -> bytes\n return self.request.body\n\n def form(self):\n # type: () -> RequestParameters\n return self.request.form\n\n def is_json(self):\n # type: () -> bool\n raise NotImplementedError()\n\n def json(self):\n # type: () -> Optional[Any]\n return self.request.json\n\n def files(self):\n # type: () -> RequestParameters\n return self.request.files\n\n def size_of_file(self, file):\n # type: (Any) -> int\n return len(file.body or ())\n\n\ndef _setup_sanic():\n # type: () -> None\n Sanic._startup = _startup\n ErrorHandler.lookup = _sentry_error_handler_lookup\n\n\ndef _setup_legacy_sanic():\n # type: () -> None\n Sanic.handle_request = _legacy_handle_request\n Router.get = _legacy_router_get\n ErrorHandler.lookup = _sentry_error_handler_lookup\n\n\nasync def _startup(self):\n # type: (Sanic) -> None\n # This happens about as early in the lifecycle as possible, just after the\n # Request object is created. The body has not yet been consumed.\n self.signal(\"http.lifecycle.request\")(_hub_enter)\n\n # This happens after the handler is complete. In v21.9 this signal is not\n # dispatched when there is an exception. Therefore we need to close out\n # and call _hub_exit from the custom exception handler as well.\n # See https://github.com/sanic-org/sanic/issues/2297\n self.signal(\"http.lifecycle.response\")(_hub_exit)\n\n # This happens inside of request handling immediately after the route\n # has been identified by the router.\n self.signal(\"http.routing.after\")(_set_transaction)\n\n # The above signals need to be declared before this can be called.\n await old_startup(self)\n\n\nasync def _hub_enter(request):\n # type: (Request) -> None\n hub = Hub.current\n request.ctx._sentry_do_integration = (\n hub.get_integration(SanicIntegration) is not None\n )\n\n if not request.ctx._sentry_do_integration:\n return\n\n weak_request = weakref.ref(request)\n request.ctx._sentry_hub = Hub(hub)\n request.ctx._sentry_hub.__enter__()\n\n with request.ctx._sentry_hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n transaction = continue_trace(\n dict(request.headers),\n op=OP.HTTP_SERVER,\n # Unless the request results in a 404 error, the name and source will get overwritten in _set_transaction\n name=request.path,\n source=TRANSACTION_SOURCE_URL,\n )\n request.ctx._sentry_transaction = request.ctx._sentry_hub.start_transaction(\n transaction\n ).__enter__()\n\n\nasync def _hub_exit(request, response=None):\n # type: (Request, Optional[BaseHTTPResponse]) -> None\n with capture_internal_exceptions():\n if not request.ctx._sentry_do_integration:\n return\n\n integration = Hub.current.get_integration(SanicIntegration) # type: Integration\n\n response_status = None if response is None else response.status\n\n # This capture_internal_exceptions block has been intentionally nested here, so that in case an exception\n # happens while trying to end the transaction, we still attempt to exit the hub.\n with capture_internal_exceptions():\n request.ctx._sentry_transaction.set_http_status(response_status)\n request.ctx._sentry_transaction.sampled &= (\n isinstance(integration, SanicIntegration)\n and response_status not in integration._unsampled_statuses\n )\n request.ctx._sentry_transaction.__exit__(None, None, None)\n\n request.ctx._sentry_hub.__exit__(None, None, None)\n\n\nasync def _set_transaction(request, route, **_):\n # type: (Request, Route, **Any) -> None\n hub = Hub.current\n if request.ctx._sentry_do_integration:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n route_name = route.name.replace(request.app.name, \"\").strip(\".\")\n scope.set_transaction_name(\n route_name, source=TRANSACTION_SOURCE_COMPONENT\n )\n\n\ndef _sentry_error_handler_lookup(self, exception, *args, **kwargs):\n # type: (Any, Exception, *Any, **Any) -> Optional[object]\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n # type: (Request, Exception) -> Any\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n # Report errors that occur in Sanic error handler. These\n # exceptions will not even show up in Sanic's\n # `sanic.exceptions` logger.\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n finally:\n # As mentioned in previous comment in _startup, this can be removed\n # after https://github.com/sanic-org/sanic/issues/2297 is resolved\n if SanicIntegration.version and SanicIntegration.version == (21, 9):\n await _hub_exit(request)\n\n return sentry_wrapped_error_handler\n\n\nasync def _legacy_handle_request(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Any\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n return old_handle_request(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n\ndef _legacy_router_get(self, *args):\n # type: (Any, Union[Any, Request]) -> Any\n rv = old_router_get(self, *args)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n if SanicIntegration.version and SanicIntegration.version >= (21, 3):\n # Sanic versions above and including 21.3 append the app name to the\n # route name, and so we need to remove it from Route name so the\n # transaction name is consistent across all versions\n sanic_app_name = self.ctx.app.name\n sanic_route = rv[0].name\n\n if sanic_route.startswith(\"%s.\" % sanic_app_name):\n # We add a 1 to the len of the sanic_app_name because there is a dot\n # that joins app name and the route name\n # Format: app_name.route_name\n sanic_route = sanic_route[len(sanic_app_name) + 1 :]\n\n scope.set_transaction_name(\n sanic_route, source=TRANSACTION_SOURCE_COMPONENT\n )\n else:\n scope.set_transaction_name(\n rv[0].__name__, source=TRANSACTION_SOURCE_COMPONENT\n )\n\n return rv\n\n\ndef _capture_exception(exception):\n # type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n # type: (Callable[[], Request]) -> EventProcessor\n def sanic_processor(event, hint):\n # type: (Event, Optional[Hint]) -> Optional[Event]\n\n try:\n if hint and issubclass(hint[\"exc_info\"][0], SanicException):\n return None\n except KeyError:\n pass\n\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n urlparts = urlparse.urlsplit(request.url)\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n urlparts.scheme,\n urlparts.netloc,\n urlparts.path,\n )\n\n request_info[\"query_string\"] = urlparts.query\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n", "path": "sentry_sdk/integrations/sanic.py"}]}
| 3,853 | 939 |
gh_patches_debug_1158
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-455
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
A custom cert name with spaces causes AWS Upload failures
Creating a cert with a custom name that has spaces, such as: `My Certificate` will not properly get uploaded to AWS.
-- Potential Fixes:
1. Prevent spaces in custom names
2. Allow custom cert names to be editable
3. If spaces are allowed, the AWS uploader plugin needs to upload it in a way that can work properly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/certificates/models.py`
Content:
```
1 """
2 .. module: lemur.certificates.models
3 :platform: Unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6 .. moduleauthor:: Kevin Glisson <[email protected]>
7 """
8 import datetime
9
10 import lemur.common.utils
11 from flask import current_app
12
13 from sqlalchemy.orm import relationship
14 from sqlalchemy.sql.expression import case
15 from sqlalchemy.ext.hybrid import hybrid_property
16 from sqlalchemy import event, Integer, ForeignKey, String, DateTime, PassiveDefault, func, Column, Text, Boolean
17
18 from lemur.database import db
19 from lemur.models import certificate_associations, certificate_source_associations, \
20 certificate_destination_associations, certificate_notification_associations, \
21 certificate_replacement_associations, roles_certificates
22 from lemur.plugins.base import plugins
23 from lemur.utils import Vault
24
25 from lemur.common import defaults
26 from lemur.domains.models import Domain
27
28
29 def get_or_increase_name(name):
30 count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()
31
32 if count >= 1:
33 return name + '-' + str(count)
34
35 return name
36
37
38 class Certificate(db.Model):
39 __tablename__ = 'certificates'
40 id = Column(Integer, primary_key=True)
41 owner = Column(String(128), nullable=False)
42 name = Column(String(128), unique=True)
43 description = Column(String(1024))
44 notify = Column(Boolean, default=True)
45
46 body = Column(Text(), nullable=False)
47 chain = Column(Text())
48 private_key = Column(Vault)
49
50 issuer = Column(String(128))
51 serial = Column(String(128))
52 cn = Column(String(128))
53 deleted = Column(Boolean, index=True)
54
55 not_before = Column(DateTime)
56 not_after = Column(DateTime)
57 date_created = Column(DateTime, PassiveDefault(func.now()), nullable=False)
58
59 signing_algorithm = Column(String(128))
60 status = Column(String(128))
61 bits = Column(Integer())
62 san = Column(String(1024)) # TODO this should be migrated to boolean
63
64 user_id = Column(Integer, ForeignKey('users.id'))
65 authority_id = Column(Integer, ForeignKey('authorities.id', ondelete="CASCADE"))
66 root_authority_id = Column(Integer, ForeignKey('authorities.id', ondelete="CASCADE"))
67
68 notifications = relationship("Notification", secondary=certificate_notification_associations, backref='certificate')
69 destinations = relationship("Destination", secondary=certificate_destination_associations, backref='certificate')
70 sources = relationship("Source", secondary=certificate_source_associations, backref='certificate')
71 domains = relationship("Domain", secondary=certificate_associations, backref="certificate")
72 roles = relationship("Role", secondary=roles_certificates, backref="certificate")
73 replaces = relationship("Certificate",
74 secondary=certificate_replacement_associations,
75 primaryjoin=id == certificate_replacement_associations.c.certificate_id, # noqa
76 secondaryjoin=id == certificate_replacement_associations.c.replaced_certificate_id, # noqa
77 backref='replaced')
78
79 endpoints = relationship("Endpoint", backref='certificate')
80
81 def __init__(self, **kwargs):
82 cert = lemur.common.utils.parse_certificate(kwargs['body'])
83
84 self.issuer = defaults.issuer(cert)
85 self.cn = defaults.common_name(cert)
86 self.san = defaults.san(cert)
87 self.not_before = defaults.not_before(cert)
88 self.not_after = defaults.not_after(cert)
89
90 # when destinations are appended they require a valid name.
91 if kwargs.get('name'):
92 self.name = get_or_increase_name(kwargs['name'])
93 else:
94 self.name = get_or_increase_name(defaults.certificate_name(self.cn, self.issuer, self.not_before, self.not_after, self.san))
95
96 self.owner = kwargs['owner']
97 self.body = kwargs['body'].strip()
98
99 if kwargs.get('private_key'):
100 self.private_key = kwargs['private_key'].strip()
101
102 if kwargs.get('chain'):
103 self.chain = kwargs['chain'].strip()
104
105 self.destinations = kwargs.get('destinations', [])
106 self.notifications = kwargs.get('notifications', [])
107 self.description = kwargs.get('description')
108 self.roles = list(set(kwargs.get('roles', [])))
109 self.replaces = kwargs.get('replacements', [])
110 self.signing_algorithm = defaults.signing_algorithm(cert)
111 self.bits = defaults.bitstrength(cert)
112 self.serial = defaults.serial(cert)
113
114 for domain in defaults.domains(cert):
115 self.domains.append(Domain(name=domain))
116
117 @property
118 def active(self):
119 if self.endpoints:
120 return True
121
122 @hybrid_property
123 def expired(self):
124 if self.not_after <= datetime.datetime.now():
125 return True
126
127 @expired.expression
128 def expired(cls):
129 return case(
130 [
131 (cls.now_after <= datetime.datetime.now(), True)
132 ],
133 else_=False
134 )
135
136 @hybrid_property
137 def revoked(self):
138 if 'revoked' == self.status:
139 return True
140
141 @revoked.expression
142 def revoked(cls):
143 return case(
144 [
145 (cls.status == 'revoked', True)
146 ],
147 else_=False
148 )
149
150 def get_arn(self, account_number):
151 """
152 Generate a valid AWS IAM arn
153
154 :rtype : str
155 :param account_number:
156 :return:
157 """
158 return "arn:aws:iam::{}:server-certificate/{}".format(account_number, self.name)
159
160
161 @event.listens_for(Certificate.destinations, 'append')
162 def update_destinations(target, value, initiator):
163 """
164 Attempt to upload the new certificate to the new destination
165
166 :param target:
167 :param value:
168 :param initiator:
169 :return:
170 """
171 destination_plugin = plugins.get(value.plugin_name)
172
173 try:
174 destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options)
175 except Exception as e:
176 current_app.logger.exception(e)
177
178
179 @event.listens_for(Certificate.replaces, 'append')
180 def update_replacement(target, value, initiator):
181 """
182 When a certificate is marked as 'replaced' it is then marked as in-active
183
184 :param target:
185 :param value:
186 :param initiator:
187 :return:
188 """
189 value.active = False
190
191
192 @event.listens_for(Certificate, 'before_update')
193 def protect_active(mapper, connection, target):
194 """
195 When a certificate has a replacement do not allow it to be marked as 'active'
196
197 :param connection:
198 :param mapper:
199 :param target:
200 :return:
201 """
202 if target.active:
203 if not target.notify:
204 raise Exception(
205 "Cannot silence notification for a certificate Lemur has been found to be currently deployed onto endpoints"
206 )
207
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lemur/certificates/models.py b/lemur/certificates/models.py
--- a/lemur/certificates/models.py
+++ b/lemur/certificates/models.py
@@ -27,6 +27,7 @@
def get_or_increase_name(name):
+ name = '-'.join(name.strip().split(' '))
count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()
if count >= 1:
|
{"golden_diff": "diff --git a/lemur/certificates/models.py b/lemur/certificates/models.py\n--- a/lemur/certificates/models.py\n+++ b/lemur/certificates/models.py\n@@ -27,6 +27,7 @@\n \n \n def get_or_increase_name(name):\n+ name = '-'.join(name.strip().split(' '))\n count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()\n \n if count >= 1:\n", "issue": "A custom cert name with spaces causes AWS Upload failures\nCreating a cert with a custom name that has spaces, such as: `My Certificate` will not properly get uploaded to AWS.\n\n-- Potential Fixes:\n1. Prevent spaces in custom names\n2. Allow custom cert names to be editable\n3. If spaces are allowed, the AWS uploader plugin needs to upload it in a way that can work properly.\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.certificates.models\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport datetime\n\nimport lemur.common.utils\nfrom flask import current_app\n\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.expression import case\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy import event, Integer, ForeignKey, String, DateTime, PassiveDefault, func, Column, Text, Boolean\n\nfrom lemur.database import db\nfrom lemur.models import certificate_associations, certificate_source_associations, \\\n certificate_destination_associations, certificate_notification_associations, \\\n certificate_replacement_associations, roles_certificates\nfrom lemur.plugins.base import plugins\nfrom lemur.utils import Vault\n\nfrom lemur.common import defaults\nfrom lemur.domains.models import Domain\n\n\ndef get_or_increase_name(name):\n count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()\n\n if count >= 1:\n return name + '-' + str(count)\n\n return name\n\n\nclass Certificate(db.Model):\n __tablename__ = 'certificates'\n id = Column(Integer, primary_key=True)\n owner = Column(String(128), nullable=False)\n name = Column(String(128), unique=True)\n description = Column(String(1024))\n notify = Column(Boolean, default=True)\n\n body = Column(Text(), nullable=False)\n chain = Column(Text())\n private_key = Column(Vault)\n\n issuer = Column(String(128))\n serial = Column(String(128))\n cn = Column(String(128))\n deleted = Column(Boolean, index=True)\n\n not_before = Column(DateTime)\n not_after = Column(DateTime)\n date_created = Column(DateTime, PassiveDefault(func.now()), nullable=False)\n\n signing_algorithm = Column(String(128))\n status = Column(String(128))\n bits = Column(Integer())\n san = Column(String(1024)) # TODO this should be migrated to boolean\n\n user_id = Column(Integer, ForeignKey('users.id'))\n authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n root_authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n\n notifications = relationship(\"Notification\", secondary=certificate_notification_associations, backref='certificate')\n destinations = relationship(\"Destination\", secondary=certificate_destination_associations, backref='certificate')\n sources = relationship(\"Source\", secondary=certificate_source_associations, backref='certificate')\n domains = relationship(\"Domain\", secondary=certificate_associations, backref=\"certificate\")\n roles = relationship(\"Role\", secondary=roles_certificates, backref=\"certificate\")\n replaces = relationship(\"Certificate\",\n secondary=certificate_replacement_associations,\n primaryjoin=id == certificate_replacement_associations.c.certificate_id, # noqa\n secondaryjoin=id == certificate_replacement_associations.c.replaced_certificate_id, # noqa\n backref='replaced')\n\n endpoints = relationship(\"Endpoint\", backref='certificate')\n\n def __init__(self, **kwargs):\n cert = lemur.common.utils.parse_certificate(kwargs['body'])\n\n self.issuer = defaults.issuer(cert)\n self.cn = defaults.common_name(cert)\n self.san = defaults.san(cert)\n self.not_before = defaults.not_before(cert)\n self.not_after = defaults.not_after(cert)\n\n # when destinations are appended they require a valid name.\n if kwargs.get('name'):\n self.name = get_or_increase_name(kwargs['name'])\n else:\n self.name = get_or_increase_name(defaults.certificate_name(self.cn, self.issuer, self.not_before, self.not_after, self.san))\n\n self.owner = kwargs['owner']\n self.body = kwargs['body'].strip()\n\n if kwargs.get('private_key'):\n self.private_key = kwargs['private_key'].strip()\n\n if kwargs.get('chain'):\n self.chain = kwargs['chain'].strip()\n\n self.destinations = kwargs.get('destinations', [])\n self.notifications = kwargs.get('notifications', [])\n self.description = kwargs.get('description')\n self.roles = list(set(kwargs.get('roles', [])))\n self.replaces = kwargs.get('replacements', [])\n self.signing_algorithm = defaults.signing_algorithm(cert)\n self.bits = defaults.bitstrength(cert)\n self.serial = defaults.serial(cert)\n\n for domain in defaults.domains(cert):\n self.domains.append(Domain(name=domain))\n\n @property\n def active(self):\n if self.endpoints:\n return True\n\n @hybrid_property\n def expired(self):\n if self.not_after <= datetime.datetime.now():\n return True\n\n @expired.expression\n def expired(cls):\n return case(\n [\n (cls.now_after <= datetime.datetime.now(), True)\n ],\n else_=False\n )\n\n @hybrid_property\n def revoked(self):\n if 'revoked' == self.status:\n return True\n\n @revoked.expression\n def revoked(cls):\n return case(\n [\n (cls.status == 'revoked', True)\n ],\n else_=False\n )\n\n def get_arn(self, account_number):\n \"\"\"\n Generate a valid AWS IAM arn\n\n :rtype : str\n :param account_number:\n :return:\n \"\"\"\n return \"arn:aws:iam::{}:server-certificate/{}\".format(account_number, self.name)\n\n\[email protected]_for(Certificate.destinations, 'append')\ndef update_destinations(target, value, initiator):\n \"\"\"\n Attempt to upload the new certificate to the new destination\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n destination_plugin = plugins.get(value.plugin_name)\n\n try:\n destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options)\n except Exception as e:\n current_app.logger.exception(e)\n\n\[email protected]_for(Certificate.replaces, 'append')\ndef update_replacement(target, value, initiator):\n \"\"\"\n When a certificate is marked as 'replaced' it is then marked as in-active\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n value.active = False\n\n\[email protected]_for(Certificate, 'before_update')\ndef protect_active(mapper, connection, target):\n \"\"\"\n When a certificate has a replacement do not allow it to be marked as 'active'\n\n :param connection:\n :param mapper:\n :param target:\n :return:\n \"\"\"\n if target.active:\n if not target.notify:\n raise Exception(\n \"Cannot silence notification for a certificate Lemur has been found to be currently deployed onto endpoints\"\n )\n", "path": "lemur/certificates/models.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.certificates.models\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nimport datetime\n\nimport lemur.common.utils\nfrom flask import current_app\n\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.expression import case\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy import event, Integer, ForeignKey, String, DateTime, PassiveDefault, func, Column, Text, Boolean\n\nfrom lemur.database import db\nfrom lemur.models import certificate_associations, certificate_source_associations, \\\n certificate_destination_associations, certificate_notification_associations, \\\n certificate_replacement_associations, roles_certificates\nfrom lemur.plugins.base import plugins\nfrom lemur.utils import Vault\n\nfrom lemur.common import defaults\nfrom lemur.domains.models import Domain\n\n\ndef get_or_increase_name(name):\n name = '-'.join(name.strip().split(' '))\n count = Certificate.query.filter(Certificate.name.ilike('{0}%'.format(name))).count()\n\n if count >= 1:\n return name + '-' + str(count)\n\n return name\n\n\nclass Certificate(db.Model):\n __tablename__ = 'certificates'\n id = Column(Integer, primary_key=True)\n owner = Column(String(128), nullable=False)\n name = Column(String(128), unique=True)\n description = Column(String(1024))\n active = Column(Boolean, default=True)\n\n body = Column(Text(), nullable=False)\n chain = Column(Text())\n private_key = Column(Vault)\n\n issuer = Column(String(128))\n serial = Column(String(128))\n cn = Column(String(128))\n deleted = Column(Boolean, index=True)\n\n not_before = Column(DateTime)\n not_after = Column(DateTime)\n date_created = Column(DateTime, PassiveDefault(func.now()), nullable=False)\n\n signing_algorithm = Column(String(128))\n status = Column(String(128))\n bits = Column(Integer())\n san = Column(String(1024)) # TODO this should be migrated to boolean\n\n user_id = Column(Integer, ForeignKey('users.id'))\n authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n root_authority_id = Column(Integer, ForeignKey('authorities.id', ondelete=\"CASCADE\"))\n\n notifications = relationship(\"Notification\", secondary=certificate_notification_associations, backref='certificate')\n destinations = relationship(\"Destination\", secondary=certificate_destination_associations, backref='certificate')\n sources = relationship(\"Source\", secondary=certificate_source_associations, backref='certificate')\n domains = relationship(\"Domain\", secondary=certificate_associations, backref=\"certificate\")\n roles = relationship(\"Role\", secondary=roles_certificates, backref=\"certificate\")\n replaces = relationship(\"Certificate\",\n secondary=certificate_replacement_associations,\n primaryjoin=id == certificate_replacement_associations.c.certificate_id, # noqa\n secondaryjoin=id == certificate_replacement_associations.c.replaced_certificate_id, # noqa\n backref='replaced')\n\n endpoints = relationship(\"Endpoint\", backref='certificate')\n\n def __init__(self, **kwargs):\n cert = lemur.common.utils.parse_certificate(kwargs['body'])\n\n self.issuer = defaults.issuer(cert)\n self.cn = defaults.common_name(cert)\n self.san = defaults.san(cert)\n self.not_before = defaults.not_before(cert)\n self.not_after = defaults.not_after(cert)\n\n # when destinations are appended they require a valid name.\n if kwargs.get('name'):\n self.name = get_or_increase_name(kwargs['name'])\n else:\n self.name = get_or_increase_name(defaults.certificate_name(self.cn, self.issuer, self.not_before, self.not_after, self.san))\n\n self.owner = kwargs['owner']\n self.body = kwargs['body'].strip()\n\n if kwargs.get('private_key'):\n self.private_key = kwargs['private_key'].strip()\n\n if kwargs.get('chain'):\n self.chain = kwargs['chain'].strip()\n\n self.destinations = kwargs.get('destinations', [])\n self.notifications = kwargs.get('notifications', [])\n self.description = kwargs.get('description')\n self.roles = list(set(kwargs.get('roles', [])))\n self.replaces = kwargs.get('replacements', [])\n self.signing_algorithm = defaults.signing_algorithm(cert)\n self.bits = defaults.bitstrength(cert)\n self.serial = defaults.serial(cert)\n\n for domain in defaults.domains(cert):\n self.domains.append(Domain(name=domain))\n\n @hybrid_property\n def expired(self):\n if self.not_after <= datetime.datetime.now():\n return True\n\n @expired.expression\n def expired(cls):\n return case(\n [\n (cls.now_after <= datetime.datetime.now(), True)\n ],\n else_=False\n )\n\n @hybrid_property\n def revoked(self):\n if 'revoked' == self.status:\n return True\n\n @revoked.expression\n def revoked(cls):\n return case(\n [\n (cls.status == 'revoked', True)\n ],\n else_=False\n )\n\n def get_arn(self, account_number):\n \"\"\"\n Generate a valid AWS IAM arn\n\n :rtype : str\n :param account_number:\n :return:\n \"\"\"\n return \"arn:aws:iam::{}:server-certificate/{}\".format(account_number, self.name)\n\n\[email protected]_for(Certificate.destinations, 'append')\ndef update_destinations(target, value, initiator):\n \"\"\"\n Attempt to upload the new certificate to the new destination\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n destination_plugin = plugins.get(value.plugin_name)\n\n try:\n destination_plugin.upload(target.name, target.body, target.private_key, target.chain, value.options)\n except Exception as e:\n current_app.logger.exception(e)\n\n\[email protected]_for(Certificate.replaces, 'append')\ndef update_replacement(target, value, initiator):\n \"\"\"\n When a certificate is marked as 'replaced' it is then marked as in-active\n\n :param target:\n :param value:\n :param initiator:\n :return:\n \"\"\"\n value.active = False\n\n\[email protected]_for(Certificate, 'before_update')\ndef protect_active(mapper, connection, target):\n \"\"\"\n When a certificate has a replacement do not allow it to be marked as 'active'\n\n :param connection:\n :param mapper:\n :param target:\n :return:\n \"\"\"\n if target.active:\n if target.replaced:\n raise Exception(\"Cannot mark certificate as active, certificate has been marked as replaced.\")\n", "path": "lemur/certificates/models.py"}]}
| 2,352 | 105 |
gh_patches_debug_8106
|
rasdani/github-patches
|
git_diff
|
aws__aws-sam-cli-815
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Region from Env Vars or profile are not respected for ALL commands but package and deploy
The region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container.
Current workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services.
Fix is in #811
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samcli/cli/options.py`
Content:
```
1 """
2 This file contains common CLI options common to all commands. As we add more commands, this will
3 become a repository of options that other commands could use when needed.
4 """
5
6 import click
7
8 from .context import Context
9
10
11 def debug_option(f):
12 """
13 Configures --debug option for CLI
14
15 :param f: Callback Function to be passed to Click
16 """
17 def callback(ctx, param, value):
18 state = ctx.ensure_object(Context)
19 state.debug = value
20 return value
21
22 return click.option('--debug',
23 expose_value=False,
24 is_flag=True,
25 envvar="SAM_DEBUG",
26 help='Turn on debug logging to print debug message generated by SAM CLI.',
27 callback=callback)(f)
28
29
30 def region_option(f):
31 """
32 Configures --region option for CLI
33
34 :param f: Callback Function to be passed to Click
35 """
36 def callback(ctx, param, value):
37 state = ctx.ensure_object(Context)
38 state.region = value
39 return value
40
41 return click.option('--region',
42 expose_value=False,
43 help='Set the AWS Region of the service (e.g. us-east-1).',
44 default='us-east-1',
45 callback=callback)(f)
46
47
48 def profile_option(f):
49 """
50 Configures --profile option for CLI
51
52 :param f: Callback Function to be passed to Click
53 """
54 def callback(ctx, param, value):
55 state = ctx.ensure_object(Context)
56 state.profile = value
57 return value
58
59 return click.option('--profile',
60 expose_value=False,
61 help='Select a specific profile from your credential file to get AWS credentials.',
62 callback=callback)(f)
63
```
Path: `samcli/__init__.py`
Content:
```
1 """
2 SAM CLI version
3 """
4
5 __version__ = '0.8.0'
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/samcli/__init__.py b/samcli/__init__.py
--- a/samcli/__init__.py
+++ b/samcli/__init__.py
@@ -2,4 +2,4 @@
SAM CLI version
"""
-__version__ = '0.8.0'
+__version__ = '0.8.1'
diff --git a/samcli/cli/options.py b/samcli/cli/options.py
--- a/samcli/cli/options.py
+++ b/samcli/cli/options.py
@@ -41,7 +41,6 @@
return click.option('--region',
expose_value=False,
help='Set the AWS Region of the service (e.g. us-east-1).',
- default='us-east-1',
callback=callback)(f)
|
{"golden_diff": "diff --git a/samcli/__init__.py b/samcli/__init__.py\n--- a/samcli/__init__.py\n+++ b/samcli/__init__.py\n@@ -2,4 +2,4 @@\n SAM CLI version\n \"\"\"\n \n-__version__ = '0.8.0'\n+__version__ = '0.8.1'\ndiff --git a/samcli/cli/options.py b/samcli/cli/options.py\n--- a/samcli/cli/options.py\n+++ b/samcli/cli/options.py\n@@ -41,7 +41,6 @@\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n- default='us-east-1',\n callback=callback)(f)\n", "issue": "Region from Env Vars or profile are not respected for ALL commands but package and deploy\nThe region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container.\r\n\r\nCurrent workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services.\r\n\r\nFix is in #811\n", "before_files": [{"content": "\"\"\"\nThis file contains common CLI options common to all commands. As we add more commands, this will\nbecome a repository of options that other commands could use when needed.\n\"\"\"\n\nimport click\n\nfrom .context import Context\n\n\ndef debug_option(f):\n \"\"\"\n Configures --debug option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.debug = value\n return value\n\n return click.option('--debug',\n expose_value=False,\n is_flag=True,\n envvar=\"SAM_DEBUG\",\n help='Turn on debug logging to print debug message generated by SAM CLI.',\n callback=callback)(f)\n\n\ndef region_option(f):\n \"\"\"\n Configures --region option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n default='us-east-1',\n callback=callback)(f)\n\n\ndef profile_option(f):\n \"\"\"\n Configures --profile option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.profile = value\n return value\n\n return click.option('--profile',\n expose_value=False,\n help='Select a specific profile from your credential file to get AWS credentials.',\n callback=callback)(f)\n", "path": "samcli/cli/options.py"}, {"content": "\"\"\"\nSAM CLI version\n\"\"\"\n\n__version__ = '0.8.0'\n", "path": "samcli/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis file contains common CLI options common to all commands. As we add more commands, this will\nbecome a repository of options that other commands could use when needed.\n\"\"\"\n\nimport click\n\nfrom .context import Context\n\n\ndef debug_option(f):\n \"\"\"\n Configures --debug option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.debug = value\n return value\n\n return click.option('--debug',\n expose_value=False,\n is_flag=True,\n envvar=\"SAM_DEBUG\",\n help='Turn on debug logging to print debug message generated by SAM CLI.',\n callback=callback)(f)\n\n\ndef region_option(f):\n \"\"\"\n Configures --region option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n callback=callback)(f)\n\n\ndef profile_option(f):\n \"\"\"\n Configures --profile option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.profile = value\n return value\n\n return click.option('--profile',\n expose_value=False,\n help='Select a specific profile from your credential file to get AWS credentials.',\n callback=callback)(f)\n", "path": "samcli/cli/options.py"}, {"content": "\"\"\"\nSAM CLI version\n\"\"\"\n\n__version__ = '0.8.1'\n", "path": "samcli/__init__.py"}]}
| 921 | 176 |
gh_patches_debug_29338
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-498
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bar_plot_stacked example unfinished?
**Problem Description**
The example in https://github.com/enthought/chaco/blob/master/examples/demo/basic/bar_plot_stacked.py
doesn't do any stacking.
**Expected behavior:**
I the bars were really stacked, I would expect the sum of all bars to reach (10+5+2) * array([1,2,3,4,5]) (the sum of all values) respectively. Instead, I am getting the following:

Looking at the code, it doesn't use the bar plot's `starting_value` as expected, so the demo doesn't even seem to try to do the right thing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/demo/basic/bar_plot_stacked.py`
Content:
```
1 """
2 Simple example of a stacked bar chart
3 """
4
5 # Major library imports
6 import numpy
7
8 # Enthought library imports
9 from enable.api import ComponentEditor
10 from traits.api import HasTraits, Instance
11 from traitsui.api import UItem, View
12
13 # Chaco imports
14 from chaco.api import LabelAxis, Plot, ArrayPlotData
15
16 class PlotExample(HasTraits):
17 plot = Instance(Plot)
18 traits_view = View(UItem('plot', editor=ComponentEditor()),
19 width=400, height=400, resizable=True,
20 )
21
22 def __init__(self, index, series_a, series_b, series_c, **kw):
23 super(PlotExample, self).__init__(**kw)
24
25 plot_data = ArrayPlotData(index=index)
26 plot_data.set_data('series_a', series_a)
27 plot_data.set_data('series_b', series_b)
28 plot_data.set_data('series_c', series_c)
29 self.plot = Plot(plot_data)
30 self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')
31 self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')
32 self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')
33
34 # set the plot's value range to 0, otherwise it may pad too much
35 self.plot.value_range.low = 0
36
37 # replace the index values with some nicer labels
38 label_axis = LabelAxis(self.plot, orientation='bottom',
39 title='Months',
40 positions = list(range(1, 10)),
41 labels = ['jan', 'feb', 'march', 'april', 'may'],
42 small_haxis_style=True)
43
44 self.plot.underlays.remove(self.plot.index_axis)
45 self.plot.index_axis = label_axis
46 self.plot.underlays.append(label_axis)
47
48
49 index = numpy.array([1,2,3,4,5])
50 demo = PlotExample(index, index*10, index*5, index*2)
51
52 if __name__ == "__main__":
53 demo.configure_traits()
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/demo/basic/bar_plot_stacked.py b/examples/demo/basic/bar_plot_stacked.py
--- a/examples/demo/basic/bar_plot_stacked.py
+++ b/examples/demo/basic/bar_plot_stacked.py
@@ -11,7 +11,7 @@
from traitsui.api import UItem, View
# Chaco imports
-from chaco.api import LabelAxis, Plot, ArrayPlotData
+from chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource
class PlotExample(HasTraits):
plot = Instance(Plot)
@@ -22,14 +22,18 @@
def __init__(self, index, series_a, series_b, series_c, **kw):
super(PlotExample, self).__init__(**kw)
+ # Stack them up
+ series_c = series_c + series_b + series_a
+ series_b = series_b + series_a
+
plot_data = ArrayPlotData(index=index)
plot_data.set_data('series_a', series_a)
plot_data.set_data('series_b', series_b)
plot_data.set_data('series_c', series_c)
self.plot = Plot(plot_data)
self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')
- self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')
- self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')
+ self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))
+ self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))
# set the plot's value range to 0, otherwise it may pad too much
self.plot.value_range.low = 0
|
{"golden_diff": "diff --git a/examples/demo/basic/bar_plot_stacked.py b/examples/demo/basic/bar_plot_stacked.py\n--- a/examples/demo/basic/bar_plot_stacked.py\n+++ b/examples/demo/basic/bar_plot_stacked.py\n@@ -11,7 +11,7 @@\n from traitsui.api import UItem, View\n \n # Chaco imports\n-from chaco.api import LabelAxis, Plot, ArrayPlotData\n+from chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource\n \n class PlotExample(HasTraits):\n plot = Instance(Plot)\n@@ -22,14 +22,18 @@\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n \n+ # Stack them up\n+ series_c = series_c + series_b + series_a\n+ series_b = series_b + series_a\n+\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n- self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')\n- self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')\n+ self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))\n+ self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))\n \n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n", "issue": "bar_plot_stacked example unfinished?\n**Problem Description**\r\nThe example in https://github.com/enthought/chaco/blob/master/examples/demo/basic/bar_plot_stacked.py\r\ndoesn't do any stacking.\r\n\r\n**Expected behavior:**\r\nI the bars were really stacked, I would expect the sum of all bars to reach (10+5+2) * array([1,2,3,4,5]) (the sum of all values) respectively. Instead, I am getting the following:\r\n\r\n\r\nLooking at the code, it doesn't use the bar plot's `starting_value` as expected, so the demo doesn't even seem to try to do the right thing.\r\n\n", "before_files": [{"content": "\"\"\"\nSimple example of a stacked bar chart\n\"\"\"\n\n# Major library imports\nimport numpy\n\n# Enthought library imports\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import UItem, View\n\n# Chaco imports\nfrom chaco.api import LabelAxis, Plot, ArrayPlotData\n\nclass PlotExample(HasTraits):\n plot = Instance(Plot)\n traits_view = View(UItem('plot', editor=ComponentEditor()),\n width=400, height=400, resizable=True, \n )\n\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')\n\n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n\n # replace the index values with some nicer labels\n label_axis = LabelAxis(self.plot, orientation='bottom',\n title='Months',\n positions = list(range(1, 10)),\n labels = ['jan', 'feb', 'march', 'april', 'may'],\n small_haxis_style=True)\n\n self.plot.underlays.remove(self.plot.index_axis)\n self.plot.index_axis = label_axis\n self.plot.underlays.append(label_axis)\n\n\nindex = numpy.array([1,2,3,4,5])\ndemo = PlotExample(index, index*10, index*5, index*2)\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/demo/basic/bar_plot_stacked.py"}], "after_files": [{"content": "\"\"\"\nSimple example of a stacked bar chart\n\"\"\"\n\n# Major library imports\nimport numpy\n\n# Enthought library imports\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import UItem, View\n\n# Chaco imports\nfrom chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource\n\nclass PlotExample(HasTraits):\n plot = Instance(Plot)\n traits_view = View(UItem('plot', editor=ComponentEditor()),\n width=400, height=400, resizable=True, \n )\n\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n\n # Stack them up\n series_c = series_c + series_b + series_a\n series_b = series_b + series_a\n\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))\n self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))\n\n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n\n # replace the index values with some nicer labels\n label_axis = LabelAxis(self.plot, orientation='bottom',\n title='Months',\n positions = list(range(1, 10)),\n labels = ['jan', 'feb', 'march', 'april', 'may'],\n small_haxis_style=True)\n\n self.plot.underlays.remove(self.plot.index_axis)\n self.plot.index_axis = label_axis\n self.plot.underlays.append(label_axis)\n\n\nindex = numpy.array([1,2,3,4,5])\ndemo = PlotExample(index, index*10, index*5, index*2)\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/demo/basic/bar_plot_stacked.py"}]}
| 1,055 | 433 |
gh_patches_debug_26646
|
rasdani/github-patches
|
git_diff
|
tobymao__sqlglot-1705
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DATE_SUB from BigQuery produces DuckDB code giving number of intervals, rather than subtracted date
```
>>> sqlglot.transpile("SELECT DATE_SUB(date_in_table, INTERVAL 5 DAY) AS five_days_ago from table;", read="bigquery", write="duckdb")
['SELECT DATE_SUB(date_in_table, 5, DAY) AS five_days_ago FROM table']
>>> sqlglot.transpile("SELECT DATE_ADD(date_in_table, INTERVAL 5 DAY) AS five_days_from_now from table;", read="bigquery", write="duckdb")
['SELECT date_in_table + INTERVAL 5 DAY AS five_days_from_now FROM table']
```
BigQuery uses DATE_SUB with INTERVAL to subtract dates, whereas DuckDb uses the `-` operator with INTERVAL.
As you can see conversion from BigQuery DATE_ADD works as expected.
**Official Documentation**
* https://duckdb.org/docs/sql/functions/date.html
* https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#date_sub
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/duckdb.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens
6 from sqlglot.dialects.dialect import (
7 Dialect,
8 approx_count_distinct_sql,
9 arrow_json_extract_scalar_sql,
10 arrow_json_extract_sql,
11 datestrtodate_sql,
12 format_time_lambda,
13 no_comment_column_constraint_sql,
14 no_properties_sql,
15 no_safe_divide_sql,
16 pivot_column_names,
17 rename_func,
18 str_position_sql,
19 str_to_time_sql,
20 timestamptrunc_sql,
21 timestrtotime_sql,
22 ts_or_ds_to_date_sql,
23 )
24 from sqlglot.helper import seq_get
25 from sqlglot.tokens import TokenType
26
27
28 def _ts_or_ds_add_sql(self: generator.Generator, expression: exp.TsOrDsAdd) -> str:
29 this = self.sql(expression, "this")
30 unit = self.sql(expression, "unit").strip("'") or "DAY"
31 return f"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
32
33
34 def _date_add_sql(self: generator.Generator, expression: exp.DateAdd) -> str:
35 this = self.sql(expression, "this")
36 unit = self.sql(expression, "unit").strip("'") or "DAY"
37 return f"{this} + {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
38
39
40 def _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str:
41 if expression.expression:
42 self.unsupported("DUCKDB ARRAY_SORT does not support a comparator")
43 return f"ARRAY_SORT({self.sql(expression, 'this')})"
44
45
46 def _sort_array_sql(self: generator.Generator, expression: exp.SortArray) -> str:
47 this = self.sql(expression, "this")
48 if expression.args.get("asc") == exp.false():
49 return f"ARRAY_REVERSE_SORT({this})"
50 return f"ARRAY_SORT({this})"
51
52
53 def _sort_array_reverse(args: t.List) -> exp.Expression:
54 return exp.SortArray(this=seq_get(args, 0), asc=exp.false())
55
56
57 def _parse_date_diff(args: t.List) -> exp.Expression:
58 return exp.DateDiff(
59 this=seq_get(args, 2),
60 expression=seq_get(args, 1),
61 unit=seq_get(args, 0),
62 )
63
64
65 def _struct_sql(self: generator.Generator, expression: exp.Struct) -> str:
66 args = [
67 f"'{e.name or e.this.name}': {self.sql(e, 'expression')}" for e in expression.expressions
68 ]
69 return f"{{{', '.join(args)}}}"
70
71
72 def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str:
73 if expression.this == exp.DataType.Type.ARRAY:
74 return f"{self.expressions(expression, flat=True)}[]"
75 return self.datatype_sql(expression)
76
77
78 def _regexp_extract_sql(self: generator.Generator, expression: exp.RegexpExtract) -> str:
79 bad_args = list(filter(expression.args.get, ("position", "occurrence")))
80 if bad_args:
81 self.unsupported(f"REGEXP_EXTRACT does not support arg(s) {bad_args}")
82
83 return self.func(
84 "REGEXP_EXTRACT",
85 expression.args.get("this"),
86 expression.args.get("expression"),
87 expression.args.get("group"),
88 )
89
90
91 class DuckDB(Dialect):
92 null_ordering = "nulls_are_last"
93
94 class Tokenizer(tokens.Tokenizer):
95 KEYWORDS = {
96 **tokens.Tokenizer.KEYWORDS,
97 "~": TokenType.RLIKE,
98 ":=": TokenType.EQ,
99 "//": TokenType.DIV,
100 "ATTACH": TokenType.COMMAND,
101 "BINARY": TokenType.VARBINARY,
102 "BPCHAR": TokenType.TEXT,
103 "BITSTRING": TokenType.BIT,
104 "CHAR": TokenType.TEXT,
105 "CHARACTER VARYING": TokenType.TEXT,
106 "EXCLUDE": TokenType.EXCEPT,
107 "INT1": TokenType.TINYINT,
108 "LOGICAL": TokenType.BOOLEAN,
109 "NUMERIC": TokenType.DOUBLE,
110 "SIGNED": TokenType.INT,
111 "STRING": TokenType.VARCHAR,
112 "UBIGINT": TokenType.UBIGINT,
113 "UINTEGER": TokenType.UINT,
114 "USMALLINT": TokenType.USMALLINT,
115 "UTINYINT": TokenType.UTINYINT,
116 }
117
118 class Parser(parser.Parser):
119 FUNCTIONS = {
120 **parser.Parser.FUNCTIONS,
121 "ARRAY_LENGTH": exp.ArraySize.from_arg_list,
122 "ARRAY_SORT": exp.SortArray.from_arg_list,
123 "ARRAY_REVERSE_SORT": _sort_array_reverse,
124 "DATEDIFF": _parse_date_diff,
125 "DATE_DIFF": _parse_date_diff,
126 "EPOCH": exp.TimeToUnix.from_arg_list,
127 "EPOCH_MS": lambda args: exp.UnixToTime(
128 this=exp.Div(
129 this=seq_get(args, 0),
130 expression=exp.Literal.number(1000),
131 )
132 ),
133 "LIST_REVERSE_SORT": _sort_array_reverse,
134 "LIST_SORT": exp.SortArray.from_arg_list,
135 "LIST_VALUE": exp.Array.from_arg_list,
136 "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
137 "STRFTIME": format_time_lambda(exp.TimeToStr, "duckdb"),
138 "STRING_SPLIT": exp.Split.from_arg_list,
139 "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
140 "STRING_TO_ARRAY": exp.Split.from_arg_list,
141 "STRPTIME": format_time_lambda(exp.StrToTime, "duckdb"),
142 "STRUCT_PACK": exp.Struct.from_arg_list,
143 "STR_SPLIT": exp.Split.from_arg_list,
144 "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
145 "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
146 "UNNEST": exp.Explode.from_arg_list,
147 }
148
149 TYPE_TOKENS = {
150 *parser.Parser.TYPE_TOKENS,
151 TokenType.UBIGINT,
152 TokenType.UINT,
153 TokenType.USMALLINT,
154 TokenType.UTINYINT,
155 }
156
157 def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
158 if len(aggregations) == 1:
159 return super()._pivot_column_names(aggregations)
160 return pivot_column_names(aggregations, dialect="duckdb")
161
162 class Generator(generator.Generator):
163 JOIN_HINTS = False
164 TABLE_HINTS = False
165 LIMIT_FETCH = "LIMIT"
166 STRUCT_DELIMITER = ("(", ")")
167 RENAME_TABLE_WITH_DB = False
168
169 TRANSFORMS = {
170 **generator.Generator.TRANSFORMS,
171 exp.ApproxDistinct: approx_count_distinct_sql,
172 exp.Array: lambda self, e: self.func("ARRAY", e.expressions[0])
173 if isinstance(seq_get(e.expressions, 0), exp.Select)
174 else rename_func("LIST_VALUE")(self, e),
175 exp.ArraySize: rename_func("ARRAY_LENGTH"),
176 exp.ArraySort: _array_sort_sql,
177 exp.ArraySum: rename_func("LIST_SUM"),
178 exp.CommentColumnConstraint: no_comment_column_constraint_sql,
179 exp.CurrentDate: lambda self, e: "CURRENT_DATE",
180 exp.CurrentTime: lambda self, e: "CURRENT_TIME",
181 exp.CurrentTimestamp: lambda self, e: "CURRENT_TIMESTAMP",
182 exp.DayOfMonth: rename_func("DAYOFMONTH"),
183 exp.DayOfWeek: rename_func("DAYOFWEEK"),
184 exp.DayOfYear: rename_func("DAYOFYEAR"),
185 exp.DataType: _datatype_sql,
186 exp.DateAdd: _date_add_sql,
187 exp.DateDiff: lambda self, e: self.func(
188 "DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
189 ),
190 exp.DateStrToDate: datestrtodate_sql,
191 exp.DateToDi: lambda self, e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)",
192 exp.DiToDate: lambda self, e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)",
193 exp.Explode: rename_func("UNNEST"),
194 exp.IntDiv: lambda self, e: self.binary(e, "//"),
195 exp.JSONExtract: arrow_json_extract_sql,
196 exp.JSONExtractScalar: arrow_json_extract_scalar_sql,
197 exp.JSONBExtract: arrow_json_extract_sql,
198 exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,
199 exp.LogicalOr: rename_func("BOOL_OR"),
200 exp.LogicalAnd: rename_func("BOOL_AND"),
201 exp.Properties: no_properties_sql,
202 exp.RegexpExtract: _regexp_extract_sql,
203 exp.RegexpLike: rename_func("REGEXP_MATCHES"),
204 exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
205 exp.SafeDivide: no_safe_divide_sql,
206 exp.Split: rename_func("STR_SPLIT"),
207 exp.SortArray: _sort_array_sql,
208 exp.StrPosition: str_position_sql,
209 exp.StrToDate: lambda self, e: f"CAST({str_to_time_sql(self, e)} AS DATE)",
210 exp.StrToTime: str_to_time_sql,
211 exp.StrToUnix: lambda self, e: f"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))",
212 exp.Struct: _struct_sql,
213 exp.TimestampTrunc: timestamptrunc_sql,
214 exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
215 exp.TimeStrToTime: timestrtotime_sql,
216 exp.TimeStrToUnix: lambda self, e: f"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))",
217 exp.TimeToStr: lambda self, e: f"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})",
218 exp.TimeToUnix: rename_func("EPOCH"),
219 exp.TsOrDiToDi: lambda self, e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
220 exp.TsOrDsAdd: _ts_or_ds_add_sql,
221 exp.TsOrDsToDate: ts_or_ds_to_date_sql("duckdb"),
222 exp.UnixToStr: lambda self, e: f"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})",
223 exp.UnixToTime: rename_func("TO_TIMESTAMP"),
224 exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
225 exp.WeekOfYear: rename_func("WEEKOFYEAR"),
226 }
227
228 TYPE_MAPPING = {
229 **generator.Generator.TYPE_MAPPING,
230 exp.DataType.Type.BINARY: "BLOB",
231 exp.DataType.Type.CHAR: "TEXT",
232 exp.DataType.Type.FLOAT: "REAL",
233 exp.DataType.Type.NCHAR: "TEXT",
234 exp.DataType.Type.NVARCHAR: "TEXT",
235 exp.DataType.Type.UINT: "UINTEGER",
236 exp.DataType.Type.VARBINARY: "BLOB",
237 exp.DataType.Type.VARCHAR: "TEXT",
238 }
239
240 STAR_MAPPING = {**generator.Generator.STAR_MAPPING, "except": "EXCLUDE"}
241
242 PROPERTIES_LOCATION = {
243 **generator.Generator.PROPERTIES_LOCATION,
244 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
245 }
246
247 def tablesample_sql(
248 self, expression: exp.TableSample, seed_prefix: str = "SEED", sep: str = " AS "
249 ) -> str:
250 return super().tablesample_sql(expression, seed_prefix="REPEATABLE", sep=sep)
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -31,10 +31,11 @@
return f"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
-def _date_add_sql(self: generator.Generator, expression: exp.DateAdd) -> str:
+def _date_delta_sql(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str:
this = self.sql(expression, "this")
unit = self.sql(expression, "unit").strip("'") or "DAY"
- return f"{this} + {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
+ op = "+" if isinstance(expression, exp.DateAdd) else "-"
+ return f"{this} {op} {self.sql(exp.Interval(this=expression.expression, unit=unit))}"
def _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str:
@@ -183,7 +184,8 @@
exp.DayOfWeek: rename_func("DAYOFWEEK"),
exp.DayOfYear: rename_func("DAYOFYEAR"),
exp.DataType: _datatype_sql,
- exp.DateAdd: _date_add_sql,
+ exp.DateAdd: _date_delta_sql,
+ exp.DateSub: _date_delta_sql,
exp.DateDiff: lambda self, e: self.func(
"DATE_DIFF", f"'{e.args.get('unit', 'day')}'", e.expression, e.this
),
|
{"golden_diff": "diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py\n--- a/sqlglot/dialects/duckdb.py\n+++ b/sqlglot/dialects/duckdb.py\n@@ -31,10 +31,11 @@\n return f\"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n \n \n-def _date_add_sql(self: generator.Generator, expression: exp.DateAdd) -> str:\n+def _date_delta_sql(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str:\n this = self.sql(expression, \"this\")\n unit = self.sql(expression, \"unit\").strip(\"'\") or \"DAY\"\n- return f\"{this} + {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n+ op = \"+\" if isinstance(expression, exp.DateAdd) else \"-\"\n+ return f\"{this} {op} {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n \n \n def _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str:\n@@ -183,7 +184,8 @@\n exp.DayOfWeek: rename_func(\"DAYOFWEEK\"),\n exp.DayOfYear: rename_func(\"DAYOFYEAR\"),\n exp.DataType: _datatype_sql,\n- exp.DateAdd: _date_add_sql,\n+ exp.DateAdd: _date_delta_sql,\n+ exp.DateSub: _date_delta_sql,\n exp.DateDiff: lambda self, e: self.func(\n \"DATE_DIFF\", f\"'{e.args.get('unit', 'day')}'\", e.expression, e.this\n ),\n", "issue": "DATE_SUB from BigQuery produces DuckDB code giving number of intervals, rather than subtracted date\n```\r\n>>> sqlglot.transpile(\"SELECT DATE_SUB(date_in_table, INTERVAL 5 DAY) AS five_days_ago from table;\", read=\"bigquery\", write=\"duckdb\")\r\n['SELECT DATE_SUB(date_in_table, 5, DAY) AS five_days_ago FROM table']\r\n>>> sqlglot.transpile(\"SELECT DATE_ADD(date_in_table, INTERVAL 5 DAY) AS five_days_from_now from table;\", read=\"bigquery\", write=\"duckdb\")\r\n['SELECT date_in_table + INTERVAL 5 DAY AS five_days_from_now FROM table']\r\n```\r\n\r\nBigQuery uses DATE_SUB with INTERVAL to subtract dates, whereas DuckDb uses the `-` operator with INTERVAL.\r\nAs you can see conversion from BigQuery DATE_ADD works as expected.\r\n\r\n**Official Documentation**\r\n* https://duckdb.org/docs/sql/functions/date.html\r\n* https://cloud.google.com/bigquery/docs/reference/standard-sql/date_functions#date_sub\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens\nfrom sqlglot.dialects.dialect import (\n Dialect,\n approx_count_distinct_sql,\n arrow_json_extract_scalar_sql,\n arrow_json_extract_sql,\n datestrtodate_sql,\n format_time_lambda,\n no_comment_column_constraint_sql,\n no_properties_sql,\n no_safe_divide_sql,\n pivot_column_names,\n rename_func,\n str_position_sql,\n str_to_time_sql,\n timestamptrunc_sql,\n timestrtotime_sql,\n ts_or_ds_to_date_sql,\n)\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _ts_or_ds_add_sql(self: generator.Generator, expression: exp.TsOrDsAdd) -> str:\n this = self.sql(expression, \"this\")\n unit = self.sql(expression, \"unit\").strip(\"'\") or \"DAY\"\n return f\"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n\n\ndef _date_add_sql(self: generator.Generator, expression: exp.DateAdd) -> str:\n this = self.sql(expression, \"this\")\n unit = self.sql(expression, \"unit\").strip(\"'\") or \"DAY\"\n return f\"{this} + {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n\n\ndef _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str:\n if expression.expression:\n self.unsupported(\"DUCKDB ARRAY_SORT does not support a comparator\")\n return f\"ARRAY_SORT({self.sql(expression, 'this')})\"\n\n\ndef _sort_array_sql(self: generator.Generator, expression: exp.SortArray) -> str:\n this = self.sql(expression, \"this\")\n if expression.args.get(\"asc\") == exp.false():\n return f\"ARRAY_REVERSE_SORT({this})\"\n return f\"ARRAY_SORT({this})\"\n\n\ndef _sort_array_reverse(args: t.List) -> exp.Expression:\n return exp.SortArray(this=seq_get(args, 0), asc=exp.false())\n\n\ndef _parse_date_diff(args: t.List) -> exp.Expression:\n return exp.DateDiff(\n this=seq_get(args, 2),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n )\n\n\ndef _struct_sql(self: generator.Generator, expression: exp.Struct) -> str:\n args = [\n f\"'{e.name or e.this.name}': {self.sql(e, 'expression')}\" for e in expression.expressions\n ]\n return f\"{{{', '.join(args)}}}\"\n\n\ndef _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str:\n if expression.this == exp.DataType.Type.ARRAY:\n return f\"{self.expressions(expression, flat=True)}[]\"\n return self.datatype_sql(expression)\n\n\ndef _regexp_extract_sql(self: generator.Generator, expression: exp.RegexpExtract) -> str:\n bad_args = list(filter(expression.args.get, (\"position\", \"occurrence\")))\n if bad_args:\n self.unsupported(f\"REGEXP_EXTRACT does not support arg(s) {bad_args}\")\n\n return self.func(\n \"REGEXP_EXTRACT\",\n expression.args.get(\"this\"),\n expression.args.get(\"expression\"),\n expression.args.get(\"group\"),\n )\n\n\nclass DuckDB(Dialect):\n null_ordering = \"nulls_are_last\"\n\n class Tokenizer(tokens.Tokenizer):\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"~\": TokenType.RLIKE,\n \":=\": TokenType.EQ,\n \"//\": TokenType.DIV,\n \"ATTACH\": TokenType.COMMAND,\n \"BINARY\": TokenType.VARBINARY,\n \"BPCHAR\": TokenType.TEXT,\n \"BITSTRING\": TokenType.BIT,\n \"CHAR\": TokenType.TEXT,\n \"CHARACTER VARYING\": TokenType.TEXT,\n \"EXCLUDE\": TokenType.EXCEPT,\n \"INT1\": TokenType.TINYINT,\n \"LOGICAL\": TokenType.BOOLEAN,\n \"NUMERIC\": TokenType.DOUBLE,\n \"SIGNED\": TokenType.INT,\n \"STRING\": TokenType.VARCHAR,\n \"UBIGINT\": TokenType.UBIGINT,\n \"UINTEGER\": TokenType.UINT,\n \"USMALLINT\": TokenType.USMALLINT,\n \"UTINYINT\": TokenType.UTINYINT,\n }\n\n class Parser(parser.Parser):\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"ARRAY_LENGTH\": exp.ArraySize.from_arg_list,\n \"ARRAY_SORT\": exp.SortArray.from_arg_list,\n \"ARRAY_REVERSE_SORT\": _sort_array_reverse,\n \"DATEDIFF\": _parse_date_diff,\n \"DATE_DIFF\": _parse_date_diff,\n \"EPOCH\": exp.TimeToUnix.from_arg_list,\n \"EPOCH_MS\": lambda args: exp.UnixToTime(\n this=exp.Div(\n this=seq_get(args, 0),\n expression=exp.Literal.number(1000),\n )\n ),\n \"LIST_REVERSE_SORT\": _sort_array_reverse,\n \"LIST_SORT\": exp.SortArray.from_arg_list,\n \"LIST_VALUE\": exp.Array.from_arg_list,\n \"REGEXP_MATCHES\": exp.RegexpLike.from_arg_list,\n \"STRFTIME\": format_time_lambda(exp.TimeToStr, \"duckdb\"),\n \"STRING_SPLIT\": exp.Split.from_arg_list,\n \"STRING_SPLIT_REGEX\": exp.RegexpSplit.from_arg_list,\n \"STRING_TO_ARRAY\": exp.Split.from_arg_list,\n \"STRPTIME\": format_time_lambda(exp.StrToTime, \"duckdb\"),\n \"STRUCT_PACK\": exp.Struct.from_arg_list,\n \"STR_SPLIT\": exp.Split.from_arg_list,\n \"STR_SPLIT_REGEX\": exp.RegexpSplit.from_arg_list,\n \"TO_TIMESTAMP\": exp.UnixToTime.from_arg_list,\n \"UNNEST\": exp.Explode.from_arg_list,\n }\n\n TYPE_TOKENS = {\n *parser.Parser.TYPE_TOKENS,\n TokenType.UBIGINT,\n TokenType.UINT,\n TokenType.USMALLINT,\n TokenType.UTINYINT,\n }\n\n def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:\n if len(aggregations) == 1:\n return super()._pivot_column_names(aggregations)\n return pivot_column_names(aggregations, dialect=\"duckdb\")\n\n class Generator(generator.Generator):\n JOIN_HINTS = False\n TABLE_HINTS = False\n LIMIT_FETCH = \"LIMIT\"\n STRUCT_DELIMITER = (\"(\", \")\")\n RENAME_TABLE_WITH_DB = False\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.ApproxDistinct: approx_count_distinct_sql,\n exp.Array: lambda self, e: self.func(\"ARRAY\", e.expressions[0])\n if isinstance(seq_get(e.expressions, 0), exp.Select)\n else rename_func(\"LIST_VALUE\")(self, e),\n exp.ArraySize: rename_func(\"ARRAY_LENGTH\"),\n exp.ArraySort: _array_sort_sql,\n exp.ArraySum: rename_func(\"LIST_SUM\"),\n exp.CommentColumnConstraint: no_comment_column_constraint_sql,\n exp.CurrentDate: lambda self, e: \"CURRENT_DATE\",\n exp.CurrentTime: lambda self, e: \"CURRENT_TIME\",\n exp.CurrentTimestamp: lambda self, e: \"CURRENT_TIMESTAMP\",\n exp.DayOfMonth: rename_func(\"DAYOFMONTH\"),\n exp.DayOfWeek: rename_func(\"DAYOFWEEK\"),\n exp.DayOfYear: rename_func(\"DAYOFYEAR\"),\n exp.DataType: _datatype_sql,\n exp.DateAdd: _date_add_sql,\n exp.DateDiff: lambda self, e: self.func(\n \"DATE_DIFF\", f\"'{e.args.get('unit', 'day')}'\", e.expression, e.this\n ),\n exp.DateStrToDate: datestrtodate_sql,\n exp.DateToDi: lambda self, e: f\"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)\",\n exp.DiToDate: lambda self, e: f\"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)\",\n exp.Explode: rename_func(\"UNNEST\"),\n exp.IntDiv: lambda self, e: self.binary(e, \"//\"),\n exp.JSONExtract: arrow_json_extract_sql,\n exp.JSONExtractScalar: arrow_json_extract_scalar_sql,\n exp.JSONBExtract: arrow_json_extract_sql,\n exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,\n exp.LogicalOr: rename_func(\"BOOL_OR\"),\n exp.LogicalAnd: rename_func(\"BOOL_AND\"),\n exp.Properties: no_properties_sql,\n exp.RegexpExtract: _regexp_extract_sql,\n exp.RegexpLike: rename_func(\"REGEXP_MATCHES\"),\n exp.RegexpSplit: rename_func(\"STR_SPLIT_REGEX\"),\n exp.SafeDivide: no_safe_divide_sql,\n exp.Split: rename_func(\"STR_SPLIT\"),\n exp.SortArray: _sort_array_sql,\n exp.StrPosition: str_position_sql,\n exp.StrToDate: lambda self, e: f\"CAST({str_to_time_sql(self, e)} AS DATE)\",\n exp.StrToTime: str_to_time_sql,\n exp.StrToUnix: lambda self, e: f\"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))\",\n exp.Struct: _struct_sql,\n exp.TimestampTrunc: timestamptrunc_sql,\n exp.TimeStrToDate: lambda self, e: f\"CAST({self.sql(e, 'this')} AS DATE)\",\n exp.TimeStrToTime: timestrtotime_sql,\n exp.TimeStrToUnix: lambda self, e: f\"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))\",\n exp.TimeToStr: lambda self, e: f\"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.TimeToUnix: rename_func(\"EPOCH\"),\n exp.TsOrDiToDi: lambda self, e: f\"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)\",\n exp.TsOrDsAdd: _ts_or_ds_add_sql,\n exp.TsOrDsToDate: ts_or_ds_to_date_sql(\"duckdb\"),\n exp.UnixToStr: lambda self, e: f\"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})\",\n exp.UnixToTime: rename_func(\"TO_TIMESTAMP\"),\n exp.UnixToTimeStr: lambda self, e: f\"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)\",\n exp.WeekOfYear: rename_func(\"WEEKOFYEAR\"),\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.CHAR: \"TEXT\",\n exp.DataType.Type.FLOAT: \"REAL\",\n exp.DataType.Type.NCHAR: \"TEXT\",\n exp.DataType.Type.NVARCHAR: \"TEXT\",\n exp.DataType.Type.UINT: \"UINTEGER\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n exp.DataType.Type.VARCHAR: \"TEXT\",\n }\n\n STAR_MAPPING = {**generator.Generator.STAR_MAPPING, \"except\": \"EXCLUDE\"}\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def tablesample_sql(\n self, expression: exp.TableSample, seed_prefix: str = \"SEED\", sep: str = \" AS \"\n ) -> str:\n return super().tablesample_sql(expression, seed_prefix=\"REPEATABLE\", sep=sep)\n", "path": "sqlglot/dialects/duckdb.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens\nfrom sqlglot.dialects.dialect import (\n Dialect,\n approx_count_distinct_sql,\n arrow_json_extract_scalar_sql,\n arrow_json_extract_sql,\n datestrtodate_sql,\n format_time_lambda,\n no_comment_column_constraint_sql,\n no_properties_sql,\n no_safe_divide_sql,\n pivot_column_names,\n rename_func,\n str_position_sql,\n str_to_time_sql,\n timestamptrunc_sql,\n timestrtotime_sql,\n ts_or_ds_to_date_sql,\n)\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _ts_or_ds_add_sql(self: generator.Generator, expression: exp.TsOrDsAdd) -> str:\n this = self.sql(expression, \"this\")\n unit = self.sql(expression, \"unit\").strip(\"'\") or \"DAY\"\n return f\"CAST({this} AS DATE) + {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n\n\ndef _date_delta_sql(self: generator.Generator, expression: exp.DateAdd | exp.DateSub) -> str:\n this = self.sql(expression, \"this\")\n unit = self.sql(expression, \"unit\").strip(\"'\") or \"DAY\"\n op = \"+\" if isinstance(expression, exp.DateAdd) else \"-\"\n return f\"{this} {op} {self.sql(exp.Interval(this=expression.expression, unit=unit))}\"\n\n\ndef _array_sort_sql(self: generator.Generator, expression: exp.ArraySort) -> str:\n if expression.expression:\n self.unsupported(\"DUCKDB ARRAY_SORT does not support a comparator\")\n return f\"ARRAY_SORT({self.sql(expression, 'this')})\"\n\n\ndef _sort_array_sql(self: generator.Generator, expression: exp.SortArray) -> str:\n this = self.sql(expression, \"this\")\n if expression.args.get(\"asc\") == exp.false():\n return f\"ARRAY_REVERSE_SORT({this})\"\n return f\"ARRAY_SORT({this})\"\n\n\ndef _sort_array_reverse(args: t.List) -> exp.Expression:\n return exp.SortArray(this=seq_get(args, 0), asc=exp.false())\n\n\ndef _parse_date_diff(args: t.List) -> exp.Expression:\n return exp.DateDiff(\n this=seq_get(args, 2),\n expression=seq_get(args, 1),\n unit=seq_get(args, 0),\n )\n\n\ndef _struct_sql(self: generator.Generator, expression: exp.Struct) -> str:\n args = [\n f\"'{e.name or e.this.name}': {self.sql(e, 'expression')}\" for e in expression.expressions\n ]\n return f\"{{{', '.join(args)}}}\"\n\n\ndef _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str:\n if expression.this == exp.DataType.Type.ARRAY:\n return f\"{self.expressions(expression, flat=True)}[]\"\n return self.datatype_sql(expression)\n\n\ndef _regexp_extract_sql(self: generator.Generator, expression: exp.RegexpExtract) -> str:\n bad_args = list(filter(expression.args.get, (\"position\", \"occurrence\")))\n if bad_args:\n self.unsupported(f\"REGEXP_EXTRACT does not support arg(s) {bad_args}\")\n\n return self.func(\n \"REGEXP_EXTRACT\",\n expression.args.get(\"this\"),\n expression.args.get(\"expression\"),\n expression.args.get(\"group\"),\n )\n\n\nclass DuckDB(Dialect):\n null_ordering = \"nulls_are_last\"\n\n class Tokenizer(tokens.Tokenizer):\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"~\": TokenType.RLIKE,\n \":=\": TokenType.EQ,\n \"//\": TokenType.DIV,\n \"ATTACH\": TokenType.COMMAND,\n \"BINARY\": TokenType.VARBINARY,\n \"BPCHAR\": TokenType.TEXT,\n \"BITSTRING\": TokenType.BIT,\n \"CHAR\": TokenType.TEXT,\n \"CHARACTER VARYING\": TokenType.TEXT,\n \"EXCLUDE\": TokenType.EXCEPT,\n \"INT1\": TokenType.TINYINT,\n \"LOGICAL\": TokenType.BOOLEAN,\n \"NUMERIC\": TokenType.DOUBLE,\n \"SIGNED\": TokenType.INT,\n \"STRING\": TokenType.VARCHAR,\n \"UBIGINT\": TokenType.UBIGINT,\n \"UINTEGER\": TokenType.UINT,\n \"USMALLINT\": TokenType.USMALLINT,\n \"UTINYINT\": TokenType.UTINYINT,\n }\n\n class Parser(parser.Parser):\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"ARRAY_LENGTH\": exp.ArraySize.from_arg_list,\n \"ARRAY_SORT\": exp.SortArray.from_arg_list,\n \"ARRAY_REVERSE_SORT\": _sort_array_reverse,\n \"DATEDIFF\": _parse_date_diff,\n \"DATE_DIFF\": _parse_date_diff,\n \"EPOCH\": exp.TimeToUnix.from_arg_list,\n \"EPOCH_MS\": lambda args: exp.UnixToTime(\n this=exp.Div(\n this=seq_get(args, 0),\n expression=exp.Literal.number(1000),\n )\n ),\n \"LIST_REVERSE_SORT\": _sort_array_reverse,\n \"LIST_SORT\": exp.SortArray.from_arg_list,\n \"LIST_VALUE\": exp.Array.from_arg_list,\n \"REGEXP_MATCHES\": exp.RegexpLike.from_arg_list,\n \"STRFTIME\": format_time_lambda(exp.TimeToStr, \"duckdb\"),\n \"STRING_SPLIT\": exp.Split.from_arg_list,\n \"STRING_SPLIT_REGEX\": exp.RegexpSplit.from_arg_list,\n \"STRING_TO_ARRAY\": exp.Split.from_arg_list,\n \"STRPTIME\": format_time_lambda(exp.StrToTime, \"duckdb\"),\n \"STRUCT_PACK\": exp.Struct.from_arg_list,\n \"STR_SPLIT\": exp.Split.from_arg_list,\n \"STR_SPLIT_REGEX\": exp.RegexpSplit.from_arg_list,\n \"TO_TIMESTAMP\": exp.UnixToTime.from_arg_list,\n \"UNNEST\": exp.Explode.from_arg_list,\n }\n\n TYPE_TOKENS = {\n *parser.Parser.TYPE_TOKENS,\n TokenType.UBIGINT,\n TokenType.UINT,\n TokenType.USMALLINT,\n TokenType.UTINYINT,\n }\n\n def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:\n if len(aggregations) == 1:\n return super()._pivot_column_names(aggregations)\n return pivot_column_names(aggregations, dialect=\"duckdb\")\n\n class Generator(generator.Generator):\n JOIN_HINTS = False\n TABLE_HINTS = False\n LIMIT_FETCH = \"LIMIT\"\n STRUCT_DELIMITER = (\"(\", \")\")\n RENAME_TABLE_WITH_DB = False\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.ApproxDistinct: approx_count_distinct_sql,\n exp.Array: lambda self, e: self.func(\"ARRAY\", e.expressions[0])\n if isinstance(seq_get(e.expressions, 0), exp.Select)\n else rename_func(\"LIST_VALUE\")(self, e),\n exp.ArraySize: rename_func(\"ARRAY_LENGTH\"),\n exp.ArraySort: _array_sort_sql,\n exp.ArraySum: rename_func(\"LIST_SUM\"),\n exp.CommentColumnConstraint: no_comment_column_constraint_sql,\n exp.CurrentDate: lambda self, e: \"CURRENT_DATE\",\n exp.CurrentTime: lambda self, e: \"CURRENT_TIME\",\n exp.CurrentTimestamp: lambda self, e: \"CURRENT_TIMESTAMP\",\n exp.DayOfMonth: rename_func(\"DAYOFMONTH\"),\n exp.DayOfWeek: rename_func(\"DAYOFWEEK\"),\n exp.DayOfYear: rename_func(\"DAYOFYEAR\"),\n exp.DataType: _datatype_sql,\n exp.DateAdd: _date_delta_sql,\n exp.DateSub: _date_delta_sql,\n exp.DateDiff: lambda self, e: self.func(\n \"DATE_DIFF\", f\"'{e.args.get('unit', 'day')}'\", e.expression, e.this\n ),\n exp.DateStrToDate: datestrtodate_sql,\n exp.DateToDi: lambda self, e: f\"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.dateint_format}) AS INT)\",\n exp.DiToDate: lambda self, e: f\"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.dateint_format}) AS DATE)\",\n exp.Explode: rename_func(\"UNNEST\"),\n exp.IntDiv: lambda self, e: self.binary(e, \"//\"),\n exp.JSONExtract: arrow_json_extract_sql,\n exp.JSONExtractScalar: arrow_json_extract_scalar_sql,\n exp.JSONBExtract: arrow_json_extract_sql,\n exp.JSONBExtractScalar: arrow_json_extract_scalar_sql,\n exp.LogicalOr: rename_func(\"BOOL_OR\"),\n exp.LogicalAnd: rename_func(\"BOOL_AND\"),\n exp.Properties: no_properties_sql,\n exp.RegexpExtract: _regexp_extract_sql,\n exp.RegexpLike: rename_func(\"REGEXP_MATCHES\"),\n exp.RegexpSplit: rename_func(\"STR_SPLIT_REGEX\"),\n exp.SafeDivide: no_safe_divide_sql,\n exp.Split: rename_func(\"STR_SPLIT\"),\n exp.SortArray: _sort_array_sql,\n exp.StrPosition: str_position_sql,\n exp.StrToDate: lambda self, e: f\"CAST({str_to_time_sql(self, e)} AS DATE)\",\n exp.StrToTime: str_to_time_sql,\n exp.StrToUnix: lambda self, e: f\"EPOCH(STRPTIME({self.sql(e, 'this')}, {self.format_time(e)}))\",\n exp.Struct: _struct_sql,\n exp.TimestampTrunc: timestamptrunc_sql,\n exp.TimeStrToDate: lambda self, e: f\"CAST({self.sql(e, 'this')} AS DATE)\",\n exp.TimeStrToTime: timestrtotime_sql,\n exp.TimeStrToUnix: lambda self, e: f\"EPOCH(CAST({self.sql(e, 'this')} AS TIMESTAMP))\",\n exp.TimeToStr: lambda self, e: f\"STRFTIME({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.TimeToUnix: rename_func(\"EPOCH\"),\n exp.TsOrDiToDi: lambda self, e: f\"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)\",\n exp.TsOrDsAdd: _ts_or_ds_add_sql,\n exp.TsOrDsToDate: ts_or_ds_to_date_sql(\"duckdb\"),\n exp.UnixToStr: lambda self, e: f\"STRFTIME(TO_TIMESTAMP({self.sql(e, 'this')}), {self.format_time(e)})\",\n exp.UnixToTime: rename_func(\"TO_TIMESTAMP\"),\n exp.UnixToTimeStr: lambda self, e: f\"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)\",\n exp.WeekOfYear: rename_func(\"WEEKOFYEAR\"),\n }\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.CHAR: \"TEXT\",\n exp.DataType.Type.FLOAT: \"REAL\",\n exp.DataType.Type.NCHAR: \"TEXT\",\n exp.DataType.Type.NVARCHAR: \"TEXT\",\n exp.DataType.Type.UINT: \"UINTEGER\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n exp.DataType.Type.VARCHAR: \"TEXT\",\n }\n\n STAR_MAPPING = {**generator.Generator.STAR_MAPPING, \"except\": \"EXCLUDE\"}\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def tablesample_sql(\n self, expression: exp.TableSample, seed_prefix: str = \"SEED\", sep: str = \" AS \"\n ) -> str:\n return super().tablesample_sql(expression, seed_prefix=\"REPEATABLE\", sep=sep)\n", "path": "sqlglot/dialects/duckdb.py"}]}
| 3,657 | 369 |
gh_patches_debug_44
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-6179
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Retirer les dernier restes de Travis
**Description du bug**
J'ai l'impression qu'il reste quelques miettes de Travis :
* https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py
* https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66
**Comportement attendu**
A priori, on ne se sert plus de Travis, donc tout ça devrait disparaître.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/settings/travis_fixture.py`
Content:
```
1 from .ci_test import *
2
3 LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = {
4 "level": "INFO",
5 "handlers": ["console"],
6 }
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py
deleted file mode 100644
--- a/zds/settings/travis_fixture.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .ci_test import *
-
-LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = {
- "level": "INFO",
- "handlers": ["console"],
-}
|
{"golden_diff": "diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py\ndeleted file mode 100644\n--- a/zds/settings/travis_fixture.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-from .ci_test import *\n-\n-LOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n- \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n-}\n", "issue": "Retirer les dernier restes de Travis\n**Description du bug**\r\n\r\nJ'ai l'impression qu'il reste quelques miettes de Travis :\r\n\r\n* https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py\r\n* https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66\r\n\r\n**Comportement attendu**\r\n\r\nA priori, on ne se sert plus de Travis, donc tout \u00e7a devrait dispara\u00eetre.\r\n\n", "before_files": [{"content": "from .ci_test import *\n\nLOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n}\n", "path": "zds/settings/travis_fixture.py"}], "after_files": [{"content": null, "path": "zds/settings/travis_fixture.py"}]}
| 463 | 102 |
gh_patches_debug_5251
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2979
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'Profile' object has no attribute 'is_minimised'
Sentry Issue: [CONCREXIT-P2](https://thalia.sentry.io/issues/4038068631/?referrer=github_integration)
```
AttributeError: 'Profile' object has no attribute 'is_minimised'
(3 additional frame(s) were not displayed)
...
File "registrations/views.py", line 238, in get_context_data
) and self.request.member.profile.is_minimised:
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/registrations/views.py`
Content:
```
1 """Views provided by the registrations package."""
2 from django.conf import settings
3 from django.contrib import messages
4 from django.contrib.admin.utils import model_ngettext
5 from django.contrib.admin.views.decorators import staff_member_required
6 from django.contrib.auth.decorators import login_required, permission_required
7 from django.contrib.contenttypes.models import ContentType
8 from django.core.exceptions import ValidationError
9 from django.db.models import Q
10 from django.http import Http404
11 from django.shortcuts import get_object_or_404, redirect
12 from django.template.defaultfilters import floatformat
13 from django.urls import reverse
14 from django.utils import timezone
15 from django.utils.decorators import method_decorator
16 from django.utils.translation import gettext_lazy as _
17 from django.views import View
18 from django.views.generic import CreateView, FormView
19 from django.views.generic.base import TemplateResponseMixin, TemplateView
20
21 from members.decorators import membership_required
22 from members.models import Membership
23
24 from . import emails, forms, services
25 from .models import Entry, Reference, Registration, Renewal
26
27
28 class BecomeAMemberView(TemplateView):
29 """View that render a HTML template with context data."""
30
31 template_name = "registrations/become_a_member.html"
32
33 def get_context_data(self, **kwargs):
34 context = super().get_context_data(**kwargs)
35 context["year_fees"] = floatformat(
36 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
37 )
38 context["study_fees"] = floatformat(
39 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
40 )
41 return context
42
43
44 @method_decorator(staff_member_required, name="dispatch")
45 @method_decorator(
46 permission_required("registrations.review_entries"),
47 name="dispatch",
48 )
49 class EntryAdminView(View):
50 """View that handles the processing of entries."""
51
52 def post(self, request, *args, **kwargs):
53 action = request.POST.get("action")
54 entry_qs = Entry.objects.filter(pk=kwargs["pk"])
55 try:
56 entry = entry_qs.get()
57 except Entry.DoesNotExist:
58 return redirect("admin:index")
59
60 if action == "accept":
61 if not services.check_unique_user(entry):
62 messages.error(
63 request,
64 _("Could not accept %s. Username is not unique.")
65 % model_ngettext(entry, 1),
66 )
67 elif services.accept_entries(request.user.pk, entry_qs) > 0:
68 messages.success(
69 request, _("Successfully accepted %s.") % model_ngettext(entry, 1)
70 )
71 else:
72 messages.error(
73 request, _("Could not accept %s.") % model_ngettext(entry, 1)
74 )
75 elif action == "reject":
76 if services.reject_entries(request.user.pk, entry_qs) > 0:
77 messages.success(
78 request, _("Successfully rejected %s.") % model_ngettext(entry, 1)
79 )
80 else:
81 messages.error(
82 request, _("Could not reject %s.") % model_ngettext(entry, 1)
83 )
84 elif action == "resend":
85 try:
86 emails.send_registration_email_confirmation(entry.registration)
87 except Registration.DoesNotExist:
88 pass
89 elif action == "revert":
90 services.revert_entry(request.user.pk, entry)
91
92 if entry_qs.filter(renewal=None).exists():
93 content_type = ContentType.objects.get_for_model(Registration)
94 else:
95 content_type = ContentType.objects.get_for_model(Renewal)
96
97 return redirect(
98 f"admin:{content_type.app_label}_{content_type.model}_change",
99 kwargs["pk"],
100 )
101
102
103 class ConfirmEmailView(View, TemplateResponseMixin):
104 """View that renders an HTML template and confirms the email address of the provided registration."""
105
106 template_name = "registrations/confirm_email.html"
107
108 def get(self, request, *args, **kwargs):
109 queryset = Registration.objects.filter(pk=kwargs["pk"])
110
111 processed = 0
112 try:
113 processed = services.confirm_entry(queryset)
114 except ValidationError:
115 pass
116
117 if processed == 0:
118 return redirect("registrations:register-member")
119
120 registration = queryset.get()
121
122 if (
123 registration.membership_type == Membership.BENEFACTOR
124 and not registration.no_references
125 ):
126 emails.send_references_information_message(registration)
127
128 emails.send_new_registration_board_message(registration)
129
130 return self.render_to_response({})
131
132
133 class BaseRegistrationFormView(FormView):
134 """View that renders a membership registration form."""
135
136 form_class = forms.MemberRegistrationForm
137 template_name = "registrations/register_member.html"
138
139 def get_context_data(self, **kwargs):
140 context = super().get_context_data(**kwargs)
141 context["google_api_key"] = settings.GOOGLE_PLACES_API_KEY
142 context["year_fees"] = floatformat(
143 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
144 )
145 context["study_fees"] = floatformat(
146 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
147 )
148 return context
149
150 def get(self, request, *args, **kwargs):
151 if request.user.is_authenticated:
152 return redirect("registrations:renew")
153 return super().get(request, args, kwargs)
154
155 def form_valid(self, form):
156 form.save()
157 emails.send_registration_email_confirmation(form.instance)
158 return redirect("registrations:register-success")
159
160
161 class MemberRegistrationFormView(BaseRegistrationFormView):
162 """View that renders the `member` membership registration form."""
163
164 form_class = forms.MemberRegistrationForm
165 template_name = "registrations/register_member.html"
166
167 def get_context_data(self, **kwargs):
168 context = super().get_context_data(**kwargs)
169 context["tpay_enabled"] = (
170 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD
171 and settings.THALIA_PAY_FOR_NEW_MEMBERS
172 )
173 return context
174
175 def post(self, request, *args, **kwargs):
176 request.POST = request.POST.dict()
177 request.POST["language"] = request.LANGUAGE_CODE
178 request.POST["membership_type"] = Membership.MEMBER
179 return super().post(request, *args, **kwargs)
180
181
182 class BenefactorRegistrationFormView(BaseRegistrationFormView):
183 """View that renders the `benefactor` membership registration form."""
184
185 form_class = forms.BenefactorRegistrationForm
186 template_name = "registrations/register_benefactor.html"
187
188 def get_context_data(self, **kwargs):
189 context = super().get_context_data(**kwargs)
190 context["tpay_enabled"] = (
191 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD
192 and settings.THALIA_PAY_FOR_NEW_MEMBERS
193 )
194 return context
195
196 def post(self, request, *args, **kwargs):
197 request.POST = request.POST.dict()
198 request.POST["language"] = request.LANGUAGE_CODE
199 request.POST["membership_type"] = Membership.BENEFACTOR
200 request.POST["length"] = Entry.MEMBERSHIP_YEAR
201 request.POST["remarks"] = (
202 "Registered as iCIS employee" if "icis_employee" in request.POST else ""
203 )
204 request.POST["no_references"] = "icis_employee" in request.POST
205 return super().post(request, *args, **kwargs)
206
207
208 @method_decorator(login_required, name="dispatch")
209 class RenewalFormView(FormView):
210 """View that renders the membership renewal form."""
211
212 form_class = forms.RenewalForm
213 template_name = "registrations/renewal.html"
214
215 def get_context_data(self, **kwargs):
216 context = super().get_context_data(**kwargs)
217 context["year_fees"] = floatformat(
218 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
219 )
220 context["study_fees"] = floatformat(
221 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
222 )
223 context["latest_membership"] = self.request.member.latest_membership
224 context["latest_renewal"] = Renewal.objects.filter(
225 Q(member=self.request.member)
226 & (
227 Q(status=Registration.STATUS_ACCEPTED)
228 | Q(status=Registration.STATUS_REVIEW)
229 )
230 ).last()
231 context["was_member"] = Membership.objects.filter(
232 user=self.request.member, type=Membership.MEMBER
233 ).exists()
234
235 if (
236 self.request.member.latest_membership
237 and not self.request.member.latest_membership.is_active()
238 ) and self.request.member.profile.is_minimised:
239 messages.warning(
240 self.request,
241 _(
242 "You seem to have been a member in the past, but your profile data has been deleted. Please contact the board to renew your membership."
243 ),
244 )
245 context["benefactor_type"] = Membership.BENEFACTOR
246 return context
247
248 def get_form(self, form_class=None):
249 form = super().get_form(form_class)
250 member = self.request.member
251 if member is not None and member.latest_membership is not None:
252 latest_membership = member.latest_membership
253 # If latest membership has not ended or does not ends
254 # within 1 month: do not show 'year' length and disable benefactor option
255 hide_year_choice = not (
256 latest_membership is not None
257 and latest_membership.until is not None
258 and (latest_membership.until - timezone.now().date()).days <= 31
259 )
260
261 if hide_year_choice:
262 form.fields["length"].choices = [
263 c
264 for c in form.fields["length"].choices
265 if c[0] != Entry.MEMBERSHIP_YEAR
266 ]
267 form.fields["membership_type"].choices = [
268 c
269 for c in form.fields["membership_type"].choices
270 if c[0] != Membership.BENEFACTOR
271 ]
272
273 return form
274
275 def post(self, request, *args, **kwargs):
276 request.POST = request.POST.dict()
277 if request.member.latest_membership.type == Membership.BENEFACTOR:
278 request.POST["membership_type"] = Membership.BENEFACTOR
279 request.POST["length"] = Entry.MEMBERSHIP_YEAR
280 request.POST["member"] = request.member.pk
281 request.POST["remarks"] = ""
282 request.POST["no_references"] = True
283
284 if request.POST["membership_type"] == Membership.BENEFACTOR:
285 request.POST["no_references"] = False
286 if Membership.objects.filter(
287 user=request.member, type=Membership.MEMBER
288 ).exists():
289 request.POST["remarks"] = "Was a Thalia member in the past."
290 request.POST["no_references"] = True
291 if "icis_employee" in request.POST:
292 request.POST["remarks"] = "Registered as iCIS employee."
293 request.POST["no_references"] = True
294
295 return super().post(request, *args, **kwargs)
296
297 def form_valid(self, form):
298 renewal = form.save()
299 if not renewal.no_references:
300 emails.send_references_information_message(renewal)
301 emails.send_new_renewal_board_message(renewal)
302 return redirect("registrations:renew-success")
303
304
305 @method_decorator(login_required, name="dispatch")
306 @method_decorator(membership_required, name="dispatch")
307 class ReferenceCreateView(CreateView):
308 """View that renders a reference creation form."""
309
310 model = Reference
311 form_class = forms.ReferenceForm
312 template_name = "registrations/reference.html"
313 entry = None
314 success = False
315
316 def get_success_url(self):
317 return reverse("registrations:reference-success", args=(self.entry.pk,))
318
319 def get_context_data(self, **kwargs):
320 context = super().get_context_data(**kwargs)
321
322 context["success"] = self.success
323 try:
324 context["name"] = self.entry.registration.get_full_name()
325 except Registration.DoesNotExist:
326 context["name"] = self.entry.renewal.member.get_full_name()
327
328 return context
329
330 def dispatch(self, request, *args, **kwargs):
331 self.entry = get_object_or_404(Entry, pk=kwargs.get("pk"))
332
333 if (
334 self.entry.no_references
335 or self.entry.membership_type != Membership.BENEFACTOR
336 ):
337 raise Http404
338
339 return super().dispatch(request, *args, **kwargs)
340
341 def post(self, request, *args, **kwargs):
342 request.POST = request.POST.dict()
343 request.POST["member"] = request.member.pk
344 request.POST["entry"] = kwargs["pk"]
345 return super().post(request, *args, **kwargs)
346
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/registrations/views.py b/website/registrations/views.py
--- a/website/registrations/views.py
+++ b/website/registrations/views.py
@@ -235,7 +235,7 @@
if (
self.request.member.latest_membership
and not self.request.member.latest_membership.is_active()
- ) and self.request.member.profile.is_minimised:
+ ) and self.request.member.profile.is_minimized:
messages.warning(
self.request,
_(
|
{"golden_diff": "diff --git a/website/registrations/views.py b/website/registrations/views.py\n--- a/website/registrations/views.py\n+++ b/website/registrations/views.py\n@@ -235,7 +235,7 @@\n if (\n self.request.member.latest_membership\n and not self.request.member.latest_membership.is_active()\n- ) and self.request.member.profile.is_minimised:\n+ ) and self.request.member.profile.is_minimized:\n messages.warning(\n self.request,\n _(\n", "issue": "AttributeError: 'Profile' object has no attribute 'is_minimised'\nSentry Issue: [CONCREXIT-P2](https://thalia.sentry.io/issues/4038068631/?referrer=github_integration)\n\n```\nAttributeError: 'Profile' object has no attribute 'is_minimised'\n(3 additional frame(s) were not displayed)\n...\n File \"registrations/views.py\", line 238, in get_context_data\n ) and self.request.member.profile.is_minimised:\n```\n", "before_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import CreateView, FormView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\n\nfrom . import emails, forms, services\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"),\n name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry_qs = Entry.objects.filter(pk=kwargs[\"pk\"])\n try:\n entry = entry_qs.get()\n except Entry.DoesNotExist:\n return redirect(\"admin:index\")\n\n if action == \"accept\":\n if not services.check_unique_user(entry):\n messages.error(\n request,\n _(\"Could not accept %s. Username is not unique.\")\n % model_ngettext(entry, 1),\n )\n elif services.accept_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully accepted %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not accept %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"reject\":\n if services.reject_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully rejected %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not reject %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"resend\":\n try:\n emails.send_registration_email_confirmation(entry.registration)\n except Registration.DoesNotExist:\n pass\n elif action == \"revert\":\n services.revert_entry(request.user.pk, entry)\n\n if entry_qs.filter(renewal=None).exists():\n content_type = ContentType.objects.get_for_model(Registration)\n else:\n content_type = ContentType.objects.get_for_model(Renewal)\n\n return redirect(\n f\"admin:{content_type.app_label}_{content_type.model}_change\",\n kwargs[\"pk\"],\n )\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n queryset = Registration.objects.filter(pk=kwargs[\"pk\"])\n\n processed = 0\n try:\n processed = services.confirm_entry(queryset)\n except ValidationError:\n pass\n\n if processed == 0:\n return redirect(\"registrations:register-member\")\n\n registration = queryset.get()\n\n if (\n registration.membership_type == Membership.BENEFACTOR\n and not registration.no_references\n ):\n emails.send_references_information_message(registration)\n\n emails.send_new_registration_board_message(registration)\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n\n if (\n self.request.member.latest_membership\n and not self.request.member.latest_membership.is_active()\n ) and self.request.member.profile.is_minimised:\n messages.warning(\n self.request,\n _(\n \"You seem to have been a member in the past, but your profile data has been deleted. Please contact the board to renew your membership.\"\n ),\n )\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n form.fields[\"membership_type\"].choices = [\n c\n for c in form.fields[\"membership_type\"].choices\n if c[0] != Membership.BENEFACTOR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}], "after_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import CreateView, FormView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\n\nfrom . import emails, forms, services\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"),\n name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry_qs = Entry.objects.filter(pk=kwargs[\"pk\"])\n try:\n entry = entry_qs.get()\n except Entry.DoesNotExist:\n return redirect(\"admin:index\")\n\n if action == \"accept\":\n if not services.check_unique_user(entry):\n messages.error(\n request,\n _(\"Could not accept %s. Username is not unique.\")\n % model_ngettext(entry, 1),\n )\n elif services.accept_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully accepted %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not accept %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"reject\":\n if services.reject_entries(request.user.pk, entry_qs) > 0:\n messages.success(\n request, _(\"Successfully rejected %s.\") % model_ngettext(entry, 1)\n )\n else:\n messages.error(\n request, _(\"Could not reject %s.\") % model_ngettext(entry, 1)\n )\n elif action == \"resend\":\n try:\n emails.send_registration_email_confirmation(entry.registration)\n except Registration.DoesNotExist:\n pass\n elif action == \"revert\":\n services.revert_entry(request.user.pk, entry)\n\n if entry_qs.filter(renewal=None).exists():\n content_type = ContentType.objects.get_for_model(Registration)\n else:\n content_type = ContentType.objects.get_for_model(Renewal)\n\n return redirect(\n f\"admin:{content_type.app_label}_{content_type.model}_change\",\n kwargs[\"pk\"],\n )\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that renders an HTML template and confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n queryset = Registration.objects.filter(pk=kwargs[\"pk\"])\n\n processed = 0\n try:\n processed = services.confirm_entry(queryset)\n except ValidationError:\n pass\n\n if processed == 0:\n return redirect(\"registrations:register-member\")\n\n registration = queryset.get()\n\n if (\n registration.membership_type == Membership.BENEFACTOR\n and not registration.no_references\n ):\n emails.send_references_information_message(registration)\n\n emails.send_new_registration_board_message(registration)\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n\n if (\n self.request.member.latest_membership\n and not self.request.member.latest_membership.is_active()\n ) and self.request.member.profile.is_minimized:\n messages.warning(\n self.request,\n _(\n \"You seem to have been a member in the past, but your profile data has been deleted. Please contact the board to renew your membership.\"\n ),\n )\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n form.fields[\"membership_type\"].choices = [\n c\n for c in form.fields[\"membership_type\"].choices\n if c[0] != Membership.BENEFACTOR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}]}
| 3,975 | 111 |
gh_patches_debug_43370
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1484
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_standings should return more flexible data
get_standings is a little inflexible because the data is constrained.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/scores/__init__.py`
Content:
```
1 from sqlalchemy.sql.expression import union_all
2
3 from CTFd.cache import cache
4 from CTFd.models import Awards, Challenges, Solves, Teams, Users, db
5 from CTFd.utils import get_config
6 from CTFd.utils.dates import unix_time_to_utc
7 from CTFd.utils.modes import get_model
8
9
10 @cache.memoize(timeout=60)
11 def get_standings(count=None, admin=False):
12 """
13 Get standings as a list of tuples containing account_id, name, and score e.g. [(account_id, team_name, score)].
14
15 Ties are broken by who reached a given score first based on the solve ID. Two users can have the same score but one
16 user will have a solve ID that is before the others. That user will be considered the tie-winner.
17
18 Challenges & Awards with a value of zero are filtered out of the calculations to avoid incorrect tie breaks.
19 """
20 Model = get_model()
21
22 scores = (
23 db.session.query(
24 Solves.account_id.label("account_id"),
25 db.func.sum(Challenges.value).label("score"),
26 db.func.max(Solves.id).label("id"),
27 db.func.max(Solves.date).label("date"),
28 )
29 .join(Challenges)
30 .filter(Challenges.value != 0)
31 .group_by(Solves.account_id)
32 )
33
34 awards = (
35 db.session.query(
36 Awards.account_id.label("account_id"),
37 db.func.sum(Awards.value).label("score"),
38 db.func.max(Awards.id).label("id"),
39 db.func.max(Awards.date).label("date"),
40 )
41 .filter(Awards.value != 0)
42 .group_by(Awards.account_id)
43 )
44
45 """
46 Filter out solves and awards that are before a specific time point.
47 """
48 freeze = get_config("freeze")
49 if not admin and freeze:
50 scores = scores.filter(Solves.date < unix_time_to_utc(freeze))
51 awards = awards.filter(Awards.date < unix_time_to_utc(freeze))
52
53 """
54 Combine awards and solves with a union. They should have the same amount of columns
55 """
56 results = union_all(scores, awards).alias("results")
57
58 """
59 Sum each of the results by the team id to get their score.
60 """
61 sumscores = (
62 db.session.query(
63 results.columns.account_id,
64 db.func.sum(results.columns.score).label("score"),
65 db.func.max(results.columns.id).label("id"),
66 db.func.max(results.columns.date).label("date"),
67 )
68 .group_by(results.columns.account_id)
69 .subquery()
70 )
71
72 """
73 Admins can see scores for all users but the public cannot see banned users.
74
75 Filters out banned users.
76 Properly resolves value ties by ID.
77
78 Different databases treat time precision differently so resolve by the row ID instead.
79 """
80 if admin:
81 standings_query = (
82 db.session.query(
83 Model.id.label("account_id"),
84 Model.oauth_id.label("oauth_id"),
85 Model.name.label("name"),
86 Model.hidden,
87 Model.banned,
88 sumscores.columns.score,
89 )
90 .join(sumscores, Model.id == sumscores.columns.account_id)
91 .order_by(sumscores.columns.score.desc(), sumscores.columns.id)
92 )
93 else:
94 standings_query = (
95 db.session.query(
96 Model.id.label("account_id"),
97 Model.oauth_id.label("oauth_id"),
98 Model.name.label("name"),
99 sumscores.columns.score,
100 )
101 .join(sumscores, Model.id == sumscores.columns.account_id)
102 .filter(Model.banned == False, Model.hidden == False)
103 .order_by(sumscores.columns.score.desc(), sumscores.columns.id)
104 )
105
106 """
107 Only select a certain amount of users if asked.
108 """
109 if count is None:
110 standings = standings_query.all()
111 else:
112 standings = standings_query.limit(count).all()
113
114 return standings
115
116
117 @cache.memoize(timeout=60)
118 def get_team_standings(count=None, admin=False):
119 scores = (
120 db.session.query(
121 Solves.team_id.label("team_id"),
122 db.func.sum(Challenges.value).label("score"),
123 db.func.max(Solves.id).label("id"),
124 db.func.max(Solves.date).label("date"),
125 )
126 .join(Challenges)
127 .filter(Challenges.value != 0)
128 .group_by(Solves.team_id)
129 )
130
131 awards = (
132 db.session.query(
133 Awards.team_id.label("team_id"),
134 db.func.sum(Awards.value).label("score"),
135 db.func.max(Awards.id).label("id"),
136 db.func.max(Awards.date).label("date"),
137 )
138 .filter(Awards.value != 0)
139 .group_by(Awards.team_id)
140 )
141
142 freeze = get_config("freeze")
143 if not admin and freeze:
144 scores = scores.filter(Solves.date < unix_time_to_utc(freeze))
145 awards = awards.filter(Awards.date < unix_time_to_utc(freeze))
146
147 results = union_all(scores, awards).alias("results")
148
149 sumscores = (
150 db.session.query(
151 results.columns.team_id,
152 db.func.sum(results.columns.score).label("score"),
153 db.func.max(results.columns.id).label("id"),
154 db.func.max(results.columns.date).label("date"),
155 )
156 .group_by(results.columns.team_id)
157 .subquery()
158 )
159
160 if admin:
161 standings_query = (
162 db.session.query(
163 Teams.id.label("team_id"),
164 Teams.oauth_id.label("oauth_id"),
165 Teams.name.label("name"),
166 Teams.hidden,
167 Teams.banned,
168 sumscores.columns.score,
169 )
170 .join(sumscores, Teams.id == sumscores.columns.team_id)
171 .order_by(sumscores.columns.score.desc(), sumscores.columns.id)
172 )
173 else:
174 standings_query = (
175 db.session.query(
176 Teams.id.label("team_id"),
177 Teams.oauth_id.label("oauth_id"),
178 Teams.name.label("name"),
179 sumscores.columns.score,
180 )
181 .join(sumscores, Teams.id == sumscores.columns.team_id)
182 .filter(Teams.banned == False)
183 .filter(Teams.hidden == False)
184 .order_by(sumscores.columns.score.desc(), sumscores.columns.id)
185 )
186
187 if count is None:
188 standings = standings_query.all()
189 else:
190 standings = standings_query.limit(count).all()
191
192 return standings
193
194
195 @cache.memoize(timeout=60)
196 def get_user_standings(count=None, admin=False):
197 scores = (
198 db.session.query(
199 Solves.user_id.label("user_id"),
200 db.func.sum(Challenges.value).label("score"),
201 db.func.max(Solves.id).label("id"),
202 db.func.max(Solves.date).label("date"),
203 )
204 .join(Challenges)
205 .filter(Challenges.value != 0)
206 .group_by(Solves.user_id)
207 )
208
209 awards = (
210 db.session.query(
211 Awards.user_id.label("user_id"),
212 db.func.sum(Awards.value).label("score"),
213 db.func.max(Awards.id).label("id"),
214 db.func.max(Awards.date).label("date"),
215 )
216 .filter(Awards.value != 0)
217 .group_by(Awards.user_id)
218 )
219
220 freeze = get_config("freeze")
221 if not admin and freeze:
222 scores = scores.filter(Solves.date < unix_time_to_utc(freeze))
223 awards = awards.filter(Awards.date < unix_time_to_utc(freeze))
224
225 results = union_all(scores, awards).alias("results")
226
227 sumscores = (
228 db.session.query(
229 results.columns.user_id,
230 db.func.sum(results.columns.score).label("score"),
231 db.func.max(results.columns.id).label("id"),
232 db.func.max(results.columns.date).label("date"),
233 )
234 .group_by(results.columns.user_id)
235 .subquery()
236 )
237
238 if admin:
239 standings_query = (
240 db.session.query(
241 Users.id.label("user_id"),
242 Users.oauth_id.label("oauth_id"),
243 Users.name.label("name"),
244 Users.hidden,
245 Users.banned,
246 sumscores.columns.score,
247 )
248 .join(sumscores, Users.id == sumscores.columns.user_id)
249 .order_by(sumscores.columns.score.desc(), sumscores.columns.id)
250 )
251 else:
252 standings_query = (
253 db.session.query(
254 Users.id.label("user_id"),
255 Users.oauth_id.label("oauth_id"),
256 Users.name.label("name"),
257 sumscores.columns.score,
258 )
259 .join(sumscores, Users.id == sumscores.columns.user_id)
260 .filter(Users.banned == False, Users.hidden == False)
261 .order_by(sumscores.columns.score.desc(), sumscores.columns.id)
262 )
263
264 if count is None:
265 standings = standings_query.all()
266 else:
267 standings = standings_query.limit(count).all()
268
269 return standings
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/CTFd/utils/scores/__init__.py b/CTFd/utils/scores/__init__.py
--- a/CTFd/utils/scores/__init__.py
+++ b/CTFd/utils/scores/__init__.py
@@ -8,7 +8,7 @@
@cache.memoize(timeout=60)
-def get_standings(count=None, admin=False):
+def get_standings(count=None, admin=False, fields=[]):
"""
Get standings as a list of tuples containing account_id, name, and score e.g. [(account_id, team_name, score)].
@@ -86,6 +86,7 @@
Model.hidden,
Model.banned,
sumscores.columns.score,
+ *fields,
)
.join(sumscores, Model.id == sumscores.columns.account_id)
.order_by(sumscores.columns.score.desc(), sumscores.columns.id)
@@ -97,6 +98,7 @@
Model.oauth_id.label("oauth_id"),
Model.name.label("name"),
sumscores.columns.score,
+ *fields,
)
.join(sumscores, Model.id == sumscores.columns.account_id)
.filter(Model.banned == False, Model.hidden == False)
@@ -115,7 +117,7 @@
@cache.memoize(timeout=60)
-def get_team_standings(count=None, admin=False):
+def get_team_standings(count=None, admin=False, fields=[]):
scores = (
db.session.query(
Solves.team_id.label("team_id"),
@@ -166,6 +168,7 @@
Teams.hidden,
Teams.banned,
sumscores.columns.score,
+ *fields,
)
.join(sumscores, Teams.id == sumscores.columns.team_id)
.order_by(sumscores.columns.score.desc(), sumscores.columns.id)
@@ -177,6 +180,7 @@
Teams.oauth_id.label("oauth_id"),
Teams.name.label("name"),
sumscores.columns.score,
+ *fields,
)
.join(sumscores, Teams.id == sumscores.columns.team_id)
.filter(Teams.banned == False)
@@ -193,7 +197,7 @@
@cache.memoize(timeout=60)
-def get_user_standings(count=None, admin=False):
+def get_user_standings(count=None, admin=False, fields=[]):
scores = (
db.session.query(
Solves.user_id.label("user_id"),
@@ -244,6 +248,7 @@
Users.hidden,
Users.banned,
sumscores.columns.score,
+ *fields,
)
.join(sumscores, Users.id == sumscores.columns.user_id)
.order_by(sumscores.columns.score.desc(), sumscores.columns.id)
@@ -255,6 +260,7 @@
Users.oauth_id.label("oauth_id"),
Users.name.label("name"),
sumscores.columns.score,
+ *fields,
)
.join(sumscores, Users.id == sumscores.columns.user_id)
.filter(Users.banned == False, Users.hidden == False)
|
{"golden_diff": "diff --git a/CTFd/utils/scores/__init__.py b/CTFd/utils/scores/__init__.py\n--- a/CTFd/utils/scores/__init__.py\n+++ b/CTFd/utils/scores/__init__.py\n@@ -8,7 +8,7 @@\n \n \n @cache.memoize(timeout=60)\n-def get_standings(count=None, admin=False):\n+def get_standings(count=None, admin=False, fields=[]):\n \"\"\"\n Get standings as a list of tuples containing account_id, name, and score e.g. [(account_id, team_name, score)].\n \n@@ -86,6 +86,7 @@\n Model.hidden,\n Model.banned,\n sumscores.columns.score,\n+ *fields,\n )\n .join(sumscores, Model.id == sumscores.columns.account_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n@@ -97,6 +98,7 @@\n Model.oauth_id.label(\"oauth_id\"),\n Model.name.label(\"name\"),\n sumscores.columns.score,\n+ *fields,\n )\n .join(sumscores, Model.id == sumscores.columns.account_id)\n .filter(Model.banned == False, Model.hidden == False)\n@@ -115,7 +117,7 @@\n \n \n @cache.memoize(timeout=60)\n-def get_team_standings(count=None, admin=False):\n+def get_team_standings(count=None, admin=False, fields=[]):\n scores = (\n db.session.query(\n Solves.team_id.label(\"team_id\"),\n@@ -166,6 +168,7 @@\n Teams.hidden,\n Teams.banned,\n sumscores.columns.score,\n+ *fields,\n )\n .join(sumscores, Teams.id == sumscores.columns.team_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n@@ -177,6 +180,7 @@\n Teams.oauth_id.label(\"oauth_id\"),\n Teams.name.label(\"name\"),\n sumscores.columns.score,\n+ *fields,\n )\n .join(sumscores, Teams.id == sumscores.columns.team_id)\n .filter(Teams.banned == False)\n@@ -193,7 +197,7 @@\n \n \n @cache.memoize(timeout=60)\n-def get_user_standings(count=None, admin=False):\n+def get_user_standings(count=None, admin=False, fields=[]):\n scores = (\n db.session.query(\n Solves.user_id.label(\"user_id\"),\n@@ -244,6 +248,7 @@\n Users.hidden,\n Users.banned,\n sumscores.columns.score,\n+ *fields,\n )\n .join(sumscores, Users.id == sumscores.columns.user_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n@@ -255,6 +260,7 @@\n Users.oauth_id.label(\"oauth_id\"),\n Users.name.label(\"name\"),\n sumscores.columns.score,\n+ *fields,\n )\n .join(sumscores, Users.id == sumscores.columns.user_id)\n .filter(Users.banned == False, Users.hidden == False)\n", "issue": "get_standings should return more flexible data\nget_standings is a little inflexible because the data is constrained. \n", "before_files": [{"content": "from sqlalchemy.sql.expression import union_all\n\nfrom CTFd.cache import cache\nfrom CTFd.models import Awards, Challenges, Solves, Teams, Users, db\nfrom CTFd.utils import get_config\nfrom CTFd.utils.dates import unix_time_to_utc\nfrom CTFd.utils.modes import get_model\n\n\[email protected](timeout=60)\ndef get_standings(count=None, admin=False):\n \"\"\"\n Get standings as a list of tuples containing account_id, name, and score e.g. [(account_id, team_name, score)].\n\n Ties are broken by who reached a given score first based on the solve ID. Two users can have the same score but one\n user will have a solve ID that is before the others. That user will be considered the tie-winner.\n\n Challenges & Awards with a value of zero are filtered out of the calculations to avoid incorrect tie breaks.\n \"\"\"\n Model = get_model()\n\n scores = (\n db.session.query(\n Solves.account_id.label(\"account_id\"),\n db.func.sum(Challenges.value).label(\"score\"),\n db.func.max(Solves.id).label(\"id\"),\n db.func.max(Solves.date).label(\"date\"),\n )\n .join(Challenges)\n .filter(Challenges.value != 0)\n .group_by(Solves.account_id)\n )\n\n awards = (\n db.session.query(\n Awards.account_id.label(\"account_id\"),\n db.func.sum(Awards.value).label(\"score\"),\n db.func.max(Awards.id).label(\"id\"),\n db.func.max(Awards.date).label(\"date\"),\n )\n .filter(Awards.value != 0)\n .group_by(Awards.account_id)\n )\n\n \"\"\"\n Filter out solves and awards that are before a specific time point.\n \"\"\"\n freeze = get_config(\"freeze\")\n if not admin and freeze:\n scores = scores.filter(Solves.date < unix_time_to_utc(freeze))\n awards = awards.filter(Awards.date < unix_time_to_utc(freeze))\n\n \"\"\"\n Combine awards and solves with a union. They should have the same amount of columns\n \"\"\"\n results = union_all(scores, awards).alias(\"results\")\n\n \"\"\"\n Sum each of the results by the team id to get their score.\n \"\"\"\n sumscores = (\n db.session.query(\n results.columns.account_id,\n db.func.sum(results.columns.score).label(\"score\"),\n db.func.max(results.columns.id).label(\"id\"),\n db.func.max(results.columns.date).label(\"date\"),\n )\n .group_by(results.columns.account_id)\n .subquery()\n )\n\n \"\"\"\n Admins can see scores for all users but the public cannot see banned users.\n\n Filters out banned users.\n Properly resolves value ties by ID.\n\n Different databases treat time precision differently so resolve by the row ID instead.\n \"\"\"\n if admin:\n standings_query = (\n db.session.query(\n Model.id.label(\"account_id\"),\n Model.oauth_id.label(\"oauth_id\"),\n Model.name.label(\"name\"),\n Model.hidden,\n Model.banned,\n sumscores.columns.score,\n )\n .join(sumscores, Model.id == sumscores.columns.account_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n else:\n standings_query = (\n db.session.query(\n Model.id.label(\"account_id\"),\n Model.oauth_id.label(\"oauth_id\"),\n Model.name.label(\"name\"),\n sumscores.columns.score,\n )\n .join(sumscores, Model.id == sumscores.columns.account_id)\n .filter(Model.banned == False, Model.hidden == False)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n\n \"\"\"\n Only select a certain amount of users if asked.\n \"\"\"\n if count is None:\n standings = standings_query.all()\n else:\n standings = standings_query.limit(count).all()\n\n return standings\n\n\[email protected](timeout=60)\ndef get_team_standings(count=None, admin=False):\n scores = (\n db.session.query(\n Solves.team_id.label(\"team_id\"),\n db.func.sum(Challenges.value).label(\"score\"),\n db.func.max(Solves.id).label(\"id\"),\n db.func.max(Solves.date).label(\"date\"),\n )\n .join(Challenges)\n .filter(Challenges.value != 0)\n .group_by(Solves.team_id)\n )\n\n awards = (\n db.session.query(\n Awards.team_id.label(\"team_id\"),\n db.func.sum(Awards.value).label(\"score\"),\n db.func.max(Awards.id).label(\"id\"),\n db.func.max(Awards.date).label(\"date\"),\n )\n .filter(Awards.value != 0)\n .group_by(Awards.team_id)\n )\n\n freeze = get_config(\"freeze\")\n if not admin and freeze:\n scores = scores.filter(Solves.date < unix_time_to_utc(freeze))\n awards = awards.filter(Awards.date < unix_time_to_utc(freeze))\n\n results = union_all(scores, awards).alias(\"results\")\n\n sumscores = (\n db.session.query(\n results.columns.team_id,\n db.func.sum(results.columns.score).label(\"score\"),\n db.func.max(results.columns.id).label(\"id\"),\n db.func.max(results.columns.date).label(\"date\"),\n )\n .group_by(results.columns.team_id)\n .subquery()\n )\n\n if admin:\n standings_query = (\n db.session.query(\n Teams.id.label(\"team_id\"),\n Teams.oauth_id.label(\"oauth_id\"),\n Teams.name.label(\"name\"),\n Teams.hidden,\n Teams.banned,\n sumscores.columns.score,\n )\n .join(sumscores, Teams.id == sumscores.columns.team_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n else:\n standings_query = (\n db.session.query(\n Teams.id.label(\"team_id\"),\n Teams.oauth_id.label(\"oauth_id\"),\n Teams.name.label(\"name\"),\n sumscores.columns.score,\n )\n .join(sumscores, Teams.id == sumscores.columns.team_id)\n .filter(Teams.banned == False)\n .filter(Teams.hidden == False)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n\n if count is None:\n standings = standings_query.all()\n else:\n standings = standings_query.limit(count).all()\n\n return standings\n\n\[email protected](timeout=60)\ndef get_user_standings(count=None, admin=False):\n scores = (\n db.session.query(\n Solves.user_id.label(\"user_id\"),\n db.func.sum(Challenges.value).label(\"score\"),\n db.func.max(Solves.id).label(\"id\"),\n db.func.max(Solves.date).label(\"date\"),\n )\n .join(Challenges)\n .filter(Challenges.value != 0)\n .group_by(Solves.user_id)\n )\n\n awards = (\n db.session.query(\n Awards.user_id.label(\"user_id\"),\n db.func.sum(Awards.value).label(\"score\"),\n db.func.max(Awards.id).label(\"id\"),\n db.func.max(Awards.date).label(\"date\"),\n )\n .filter(Awards.value != 0)\n .group_by(Awards.user_id)\n )\n\n freeze = get_config(\"freeze\")\n if not admin and freeze:\n scores = scores.filter(Solves.date < unix_time_to_utc(freeze))\n awards = awards.filter(Awards.date < unix_time_to_utc(freeze))\n\n results = union_all(scores, awards).alias(\"results\")\n\n sumscores = (\n db.session.query(\n results.columns.user_id,\n db.func.sum(results.columns.score).label(\"score\"),\n db.func.max(results.columns.id).label(\"id\"),\n db.func.max(results.columns.date).label(\"date\"),\n )\n .group_by(results.columns.user_id)\n .subquery()\n )\n\n if admin:\n standings_query = (\n db.session.query(\n Users.id.label(\"user_id\"),\n Users.oauth_id.label(\"oauth_id\"),\n Users.name.label(\"name\"),\n Users.hidden,\n Users.banned,\n sumscores.columns.score,\n )\n .join(sumscores, Users.id == sumscores.columns.user_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n else:\n standings_query = (\n db.session.query(\n Users.id.label(\"user_id\"),\n Users.oauth_id.label(\"oauth_id\"),\n Users.name.label(\"name\"),\n sumscores.columns.score,\n )\n .join(sumscores, Users.id == sumscores.columns.user_id)\n .filter(Users.banned == False, Users.hidden == False)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n\n if count is None:\n standings = standings_query.all()\n else:\n standings = standings_query.limit(count).all()\n\n return standings\n", "path": "CTFd/utils/scores/__init__.py"}], "after_files": [{"content": "from sqlalchemy.sql.expression import union_all\n\nfrom CTFd.cache import cache\nfrom CTFd.models import Awards, Challenges, Solves, Teams, Users, db\nfrom CTFd.utils import get_config\nfrom CTFd.utils.dates import unix_time_to_utc\nfrom CTFd.utils.modes import get_model\n\n\[email protected](timeout=60)\ndef get_standings(count=None, admin=False, fields=[]):\n \"\"\"\n Get standings as a list of tuples containing account_id, name, and score e.g. [(account_id, team_name, score)].\n\n Ties are broken by who reached a given score first based on the solve ID. Two users can have the same score but one\n user will have a solve ID that is before the others. That user will be considered the tie-winner.\n\n Challenges & Awards with a value of zero are filtered out of the calculations to avoid incorrect tie breaks.\n \"\"\"\n Model = get_model()\n\n scores = (\n db.session.query(\n Solves.account_id.label(\"account_id\"),\n db.func.sum(Challenges.value).label(\"score\"),\n db.func.max(Solves.id).label(\"id\"),\n db.func.max(Solves.date).label(\"date\"),\n )\n .join(Challenges)\n .filter(Challenges.value != 0)\n .group_by(Solves.account_id)\n )\n\n awards = (\n db.session.query(\n Awards.account_id.label(\"account_id\"),\n db.func.sum(Awards.value).label(\"score\"),\n db.func.max(Awards.id).label(\"id\"),\n db.func.max(Awards.date).label(\"date\"),\n )\n .filter(Awards.value != 0)\n .group_by(Awards.account_id)\n )\n\n \"\"\"\n Filter out solves and awards that are before a specific time point.\n \"\"\"\n freeze = get_config(\"freeze\")\n if not admin and freeze:\n scores = scores.filter(Solves.date < unix_time_to_utc(freeze))\n awards = awards.filter(Awards.date < unix_time_to_utc(freeze))\n\n \"\"\"\n Combine awards and solves with a union. They should have the same amount of columns\n \"\"\"\n results = union_all(scores, awards).alias(\"results\")\n\n \"\"\"\n Sum each of the results by the team id to get their score.\n \"\"\"\n sumscores = (\n db.session.query(\n results.columns.account_id,\n db.func.sum(results.columns.score).label(\"score\"),\n db.func.max(results.columns.id).label(\"id\"),\n db.func.max(results.columns.date).label(\"date\"),\n )\n .group_by(results.columns.account_id)\n .subquery()\n )\n\n \"\"\"\n Admins can see scores for all users but the public cannot see banned users.\n\n Filters out banned users.\n Properly resolves value ties by ID.\n\n Different databases treat time precision differently so resolve by the row ID instead.\n \"\"\"\n if admin:\n standings_query = (\n db.session.query(\n Model.id.label(\"account_id\"),\n Model.oauth_id.label(\"oauth_id\"),\n Model.name.label(\"name\"),\n Model.hidden,\n Model.banned,\n sumscores.columns.score,\n *fields,\n )\n .join(sumscores, Model.id == sumscores.columns.account_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n else:\n standings_query = (\n db.session.query(\n Model.id.label(\"account_id\"),\n Model.oauth_id.label(\"oauth_id\"),\n Model.name.label(\"name\"),\n sumscores.columns.score,\n *fields,\n )\n .join(sumscores, Model.id == sumscores.columns.account_id)\n .filter(Model.banned == False, Model.hidden == False)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n\n \"\"\"\n Only select a certain amount of users if asked.\n \"\"\"\n if count is None:\n standings = standings_query.all()\n else:\n standings = standings_query.limit(count).all()\n\n return standings\n\n\[email protected](timeout=60)\ndef get_team_standings(count=None, admin=False, fields=[]):\n scores = (\n db.session.query(\n Solves.team_id.label(\"team_id\"),\n db.func.sum(Challenges.value).label(\"score\"),\n db.func.max(Solves.id).label(\"id\"),\n db.func.max(Solves.date).label(\"date\"),\n )\n .join(Challenges)\n .filter(Challenges.value != 0)\n .group_by(Solves.team_id)\n )\n\n awards = (\n db.session.query(\n Awards.team_id.label(\"team_id\"),\n db.func.sum(Awards.value).label(\"score\"),\n db.func.max(Awards.id).label(\"id\"),\n db.func.max(Awards.date).label(\"date\"),\n )\n .filter(Awards.value != 0)\n .group_by(Awards.team_id)\n )\n\n freeze = get_config(\"freeze\")\n if not admin and freeze:\n scores = scores.filter(Solves.date < unix_time_to_utc(freeze))\n awards = awards.filter(Awards.date < unix_time_to_utc(freeze))\n\n results = union_all(scores, awards).alias(\"results\")\n\n sumscores = (\n db.session.query(\n results.columns.team_id,\n db.func.sum(results.columns.score).label(\"score\"),\n db.func.max(results.columns.id).label(\"id\"),\n db.func.max(results.columns.date).label(\"date\"),\n )\n .group_by(results.columns.team_id)\n .subquery()\n )\n\n if admin:\n standings_query = (\n db.session.query(\n Teams.id.label(\"team_id\"),\n Teams.oauth_id.label(\"oauth_id\"),\n Teams.name.label(\"name\"),\n Teams.hidden,\n Teams.banned,\n sumscores.columns.score,\n *fields,\n )\n .join(sumscores, Teams.id == sumscores.columns.team_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n else:\n standings_query = (\n db.session.query(\n Teams.id.label(\"team_id\"),\n Teams.oauth_id.label(\"oauth_id\"),\n Teams.name.label(\"name\"),\n sumscores.columns.score,\n *fields,\n )\n .join(sumscores, Teams.id == sumscores.columns.team_id)\n .filter(Teams.banned == False)\n .filter(Teams.hidden == False)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n\n if count is None:\n standings = standings_query.all()\n else:\n standings = standings_query.limit(count).all()\n\n return standings\n\n\[email protected](timeout=60)\ndef get_user_standings(count=None, admin=False, fields=[]):\n scores = (\n db.session.query(\n Solves.user_id.label(\"user_id\"),\n db.func.sum(Challenges.value).label(\"score\"),\n db.func.max(Solves.id).label(\"id\"),\n db.func.max(Solves.date).label(\"date\"),\n )\n .join(Challenges)\n .filter(Challenges.value != 0)\n .group_by(Solves.user_id)\n )\n\n awards = (\n db.session.query(\n Awards.user_id.label(\"user_id\"),\n db.func.sum(Awards.value).label(\"score\"),\n db.func.max(Awards.id).label(\"id\"),\n db.func.max(Awards.date).label(\"date\"),\n )\n .filter(Awards.value != 0)\n .group_by(Awards.user_id)\n )\n\n freeze = get_config(\"freeze\")\n if not admin and freeze:\n scores = scores.filter(Solves.date < unix_time_to_utc(freeze))\n awards = awards.filter(Awards.date < unix_time_to_utc(freeze))\n\n results = union_all(scores, awards).alias(\"results\")\n\n sumscores = (\n db.session.query(\n results.columns.user_id,\n db.func.sum(results.columns.score).label(\"score\"),\n db.func.max(results.columns.id).label(\"id\"),\n db.func.max(results.columns.date).label(\"date\"),\n )\n .group_by(results.columns.user_id)\n .subquery()\n )\n\n if admin:\n standings_query = (\n db.session.query(\n Users.id.label(\"user_id\"),\n Users.oauth_id.label(\"oauth_id\"),\n Users.name.label(\"name\"),\n Users.hidden,\n Users.banned,\n sumscores.columns.score,\n *fields,\n )\n .join(sumscores, Users.id == sumscores.columns.user_id)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n else:\n standings_query = (\n db.session.query(\n Users.id.label(\"user_id\"),\n Users.oauth_id.label(\"oauth_id\"),\n Users.name.label(\"name\"),\n sumscores.columns.score,\n *fields,\n )\n .join(sumscores, Users.id == sumscores.columns.user_id)\n .filter(Users.banned == False, Users.hidden == False)\n .order_by(sumscores.columns.score.desc(), sumscores.columns.id)\n )\n\n if count is None:\n standings = standings_query.all()\n else:\n standings = standings_query.limit(count).all()\n\n return standings\n", "path": "CTFd/utils/scores/__init__.py"}]}
| 2,911 | 683 |
gh_patches_debug_10578
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-1167
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
We should disable the remap_user_ids command for the time being
We have changes on Staging that need to make their way to Production soon.
The `remap_user_ids` command is not working properly (#1165).
We should disable the command for now so we can deploy recent changes to Production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/management/commands/remap_user_ids.py`
Content:
```
1 from main_app.models import Source, Chant
2 from django.contrib.auth import get_user_model
3 from django.core.management.base import BaseCommand
4 from sys import stdout
5 from django.db.models.query import QuerySet
6 from typing import Optional
7
8 User = get_user_model()
9
10 USER_ID_MAPPING = {
11 # Fake user accounts with sequential numbering were created on NewCantus
12 # for OldCantus Indexers. In the time since user accounts were
13 # programmatically synced, new user accounts were created on OldCantus,
14 # which duplicated these IDs. Then, we manually created new user accounts
15 # on NewCantus for these newer users, with new IDs that don't match those
16 # in OldCantus.
17 #
18 # In this dictionary:
19 # - Keys represent the IDs of users recently created on OldCantus, which collide
20 # with those of NewCantus Indexers
21 # - Values represent the IDs of manually-created users in NewCantus.
22 251610: 251660,
23 251611: 251661,
24 251612: 251662,
25 251613: 251663,
26 251614: 251664,
27 251616: 251665,
28 251617: 251666,
29 251618: 251667,
30 251619: 251668,
31 251620: 251669,
32 251621: 251670,
33 251622: 251671,
34 251623: 251672,
35 251624: 251673,
36 251625: 251674,
37 251626: 251657,
38 251627: 251675,
39 251630: 251676,
40 251632: 251678,
41 251633: 251679,
42 251638: 251656,
43 251639: 251680,
44 251640: 251681,
45 251641: 251682,
46 251642: 251683,
47 251643: 251684,
48 251645: 251685,
49 }
50
51
52 def reassign_sources() -> None:
53 CHUNK_SIZE = 1_000
54 sources: QuerySet[Source] = Source.objects.all()
55 sources_count: int = sources.count()
56 start_index: int = 0
57 while start_index <= sources_count:
58 stdout.write(f"processing chunk with {start_index=}\n")
59 chunk: QuerySet[Source] = sources[start_index : start_index + CHUNK_SIZE]
60 for source in chunk:
61 old_creator: Optional[User] = source.created_by
62
63 updated_id: Optional[int] = None
64 try:
65 updated_id: int = USER_ID_MAPPING[old_creator.id]
66 except (
67 KeyError, # old_creator.id not in USER_ID_MAPPING
68 AttributeError, # old_creator is None
69 ):
70 pass
71
72 if updated_id is None:
73 # user ID doesn't need to be remapped
74 continue
75
76 updated_creator: Optional[User] = None
77 try:
78 updated_creator = User.objects.get(id=updated_id)
79 except (
80 User.DoesNotExist,
81 AttributeError,
82 ):
83 pass
84
85 source.created_by = updated_creator
86 source.save()
87 start_index += CHUNK_SIZE
88
89
90 def reassign_chants() -> None:
91 CHUNK_SIZE = 1_000
92 chants: QuerySet[Chant] = Chant.objects.all()
93 chants_count: int = chants.count()
94 start_index: int = 0
95 while start_index <= chants_count:
96 stdout.write(f"processing chunk with {start_index=}\n")
97 chunk: QuerySet[Chant] = chants[start_index : start_index + CHUNK_SIZE]
98 for chant in chunk:
99 old_creator: Optional[User] = chant.created_by
100
101 updated_id: Optional[int] = None
102 try:
103 updated_id: int = USER_ID_MAPPING[old_creator.id]
104 except (
105 KeyError, # old_creator.id not in USER_ID_MAPPING
106 AttributeError, # old_creator is None
107 ):
108 pass
109
110 if updated_id is None:
111 # user ID doesn't need to be remapped
112 continue
113
114 updated_creator: Optional[User] = None
115 try:
116 updated_creator = User.objects.get(id=updated_id)
117 except User.DoesNotExist:
118 pass
119
120 chant.created_by = updated_creator
121 chant.save()
122 start_index += CHUNK_SIZE
123
124
125 class Command(BaseCommand):
126 def handle(self, *args, **kwargs) -> None:
127 stdout.write("\n\n==== Reassigning Sources ====\n")
128 reassign_sources()
129 stdout.write("\n== All sources successfully remapped! ==\n")
130 stdout.write("\n\n==== Reassigning Chants ====\n")
131 reassign_chants()
132 stdout.write("\n== All chants successfully remapped! ==\n")
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py
--- a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py
+++ b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py
@@ -124,6 +124,11 @@
class Command(BaseCommand):
def handle(self, *args, **kwargs) -> None:
+ error_message = (
+ "As of late November 2023, this command is not working. "
+ "It has been temporarily disabled until the bugs have been worked out."
+ )
+ raise NotImplementedError(error_message)
stdout.write("\n\n==== Reassigning Sources ====\n")
reassign_sources()
stdout.write("\n== All sources successfully remapped! ==\n")
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py\n--- a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py\n+++ b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py\n@@ -124,6 +124,11 @@\n \n class Command(BaseCommand):\n def handle(self, *args, **kwargs) -> None:\n+ error_message = (\n+ \"As of late November 2023, this command is not working. \"\n+ \"It has been temporarily disabled until the bugs have been worked out.\"\n+ )\n+ raise NotImplementedError(error_message)\n stdout.write(\"\\n\\n==== Reassigning Sources ====\\n\")\n reassign_sources()\n stdout.write(\"\\n== All sources successfully remapped! ==\\n\")\n", "issue": "We should disable the remap_user_ids command for the time being\nWe have changes on Staging that need to make their way to Production soon.\r\n\r\nThe `remap_user_ids` command is not working properly (#1165).\r\n\r\nWe should disable the command for now so we can deploy recent changes to Production.\n", "before_files": [{"content": "from main_app.models import Source, Chant\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom sys import stdout\nfrom django.db.models.query import QuerySet\nfrom typing import Optional\n\nUser = get_user_model()\n\nUSER_ID_MAPPING = {\n # Fake user accounts with sequential numbering were created on NewCantus\n # for OldCantus Indexers. In the time since user accounts were\n # programmatically synced, new user accounts were created on OldCantus,\n # which duplicated these IDs. Then, we manually created new user accounts\n # on NewCantus for these newer users, with new IDs that don't match those\n # in OldCantus.\n #\n # In this dictionary:\n # - Keys represent the IDs of users recently created on OldCantus, which collide\n # with those of NewCantus Indexers\n # - Values represent the IDs of manually-created users in NewCantus.\n 251610: 251660,\n 251611: 251661,\n 251612: 251662,\n 251613: 251663,\n 251614: 251664,\n 251616: 251665,\n 251617: 251666,\n 251618: 251667,\n 251619: 251668,\n 251620: 251669,\n 251621: 251670,\n 251622: 251671,\n 251623: 251672,\n 251624: 251673,\n 251625: 251674,\n 251626: 251657,\n 251627: 251675,\n 251630: 251676,\n 251632: 251678,\n 251633: 251679,\n 251638: 251656,\n 251639: 251680,\n 251640: 251681,\n 251641: 251682,\n 251642: 251683,\n 251643: 251684,\n 251645: 251685,\n}\n\n\ndef reassign_sources() -> None:\n CHUNK_SIZE = 1_000\n sources: QuerySet[Source] = Source.objects.all()\n sources_count: int = sources.count()\n start_index: int = 0\n while start_index <= sources_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Source] = sources[start_index : start_index + CHUNK_SIZE]\n for source in chunk:\n old_creator: Optional[User] = source.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except (\n User.DoesNotExist,\n AttributeError,\n ):\n pass\n\n source.created_by = updated_creator\n source.save()\n start_index += CHUNK_SIZE\n\n\ndef reassign_chants() -> None:\n CHUNK_SIZE = 1_000\n chants: QuerySet[Chant] = Chant.objects.all()\n chants_count: int = chants.count()\n start_index: int = 0\n while start_index <= chants_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Chant] = chants[start_index : start_index + CHUNK_SIZE]\n for chant in chunk:\n old_creator: Optional[User] = chant.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except User.DoesNotExist:\n pass\n\n chant.created_by = updated_creator\n chant.save()\n start_index += CHUNK_SIZE\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs) -> None:\n stdout.write(\"\\n\\n==== Reassigning Sources ====\\n\")\n reassign_sources()\n stdout.write(\"\\n== All sources successfully remapped! ==\\n\")\n stdout.write(\"\\n\\n==== Reassigning Chants ====\\n\")\n reassign_chants()\n stdout.write(\"\\n== All chants successfully remapped! ==\\n\")\n", "path": "django/cantusdb_project/main_app/management/commands/remap_user_ids.py"}], "after_files": [{"content": "from main_app.models import Source, Chant\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom sys import stdout\nfrom django.db.models.query import QuerySet\nfrom typing import Optional\n\nUser = get_user_model()\n\nUSER_ID_MAPPING = {\n # Fake user accounts with sequential numbering were created on NewCantus\n # for OldCantus Indexers. In the time since user accounts were\n # programmatically synced, new user accounts were created on OldCantus,\n # which duplicated these IDs. Then, we manually created new user accounts\n # on NewCantus for these newer users, with new IDs that don't match those\n # in OldCantus.\n #\n # In this dictionary:\n # - Keys represent the IDs of users recently created on OldCantus, which collide\n # with those of NewCantus Indexers\n # - Values represent the IDs of manually-created users in NewCantus.\n 251610: 251660,\n 251611: 251661,\n 251612: 251662,\n 251613: 251663,\n 251614: 251664,\n 251616: 251665,\n 251617: 251666,\n 251618: 251667,\n 251619: 251668,\n 251620: 251669,\n 251621: 251670,\n 251622: 251671,\n 251623: 251672,\n 251624: 251673,\n 251625: 251674,\n 251626: 251657,\n 251627: 251675,\n 251630: 251676,\n 251632: 251678,\n 251633: 251679,\n 251638: 251656,\n 251639: 251680,\n 251640: 251681,\n 251641: 251682,\n 251642: 251683,\n 251643: 251684,\n 251645: 251685,\n}\n\n\ndef reassign_sources() -> None:\n CHUNK_SIZE = 1_000\n sources: QuerySet[Source] = Source.objects.all()\n sources_count: int = sources.count()\n start_index: int = 0\n while start_index <= sources_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Source] = sources[start_index : start_index + CHUNK_SIZE]\n for source in chunk:\n old_creator: Optional[User] = source.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except (\n User.DoesNotExist,\n AttributeError,\n ):\n pass\n\n source.created_by = updated_creator\n source.save()\n start_index += CHUNK_SIZE\n\n\ndef reassign_chants() -> None:\n CHUNK_SIZE = 1_000\n chants: QuerySet[Chant] = Chant.objects.all()\n chants_count: int = chants.count()\n start_index: int = 0\n while start_index <= chants_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Chant] = chants[start_index : start_index + CHUNK_SIZE]\n for chant in chunk:\n old_creator: Optional[User] = chant.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except User.DoesNotExist:\n pass\n\n chant.created_by = updated_creator\n chant.save()\n start_index += CHUNK_SIZE\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs) -> None:\n error_message = (\n \"As of late November 2023, this command is not working. \"\n \"It has been temporarily disabled until the bugs have been worked out.\"\n )\n raise NotImplementedError(error_message)\n stdout.write(\"\\n\\n==== Reassigning Sources ====\\n\")\n reassign_sources()\n stdout.write(\"\\n== All sources successfully remapped! ==\\n\")\n stdout.write(\"\\n\\n==== Reassigning Chants ====\\n\")\n reassign_chants()\n stdout.write(\"\\n== All chants successfully remapped! ==\\n\")\n", "path": "django/cantusdb_project/main_app/management/commands/remap_user_ids.py"}]}
| 1,909 | 207 |
gh_patches_debug_43470
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-386
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No index driver found for 'default'
Greetings!
Using the latest Datacube version and libraries, indexing and ingestion runs well except when I try to access the database on Jupyter I get the "No index driver found for 'default': 0 available: " error.
> RuntimeError Traceback (most recent call last)
> ipython-input-2-f074b535470c> in <module>()
> 1
> 2 import datacube
> ----> 3 dc = datacube.Datacube(config='E:\Datacubes\datacube-core-develop\datacube.conf')
> 4 dc
>
> E:\Datacubes\datacube-core-develop\datacube\api\core.py in __init__(self, index, config, app, env, validate_connection)
> 116 index = index_connect(normalise_config(config),
> 117 application_name=app,
> --> 118 validate_connection=validate_connection)
> 119
> 120 self.index = index
>
> E:\Datacubes\datacube-core-develop\datacube\index\_api.py in index_connect(local_config, application_name, validate_connection)
> 37 raise RuntimeError(
> 38 "No index driver found for %r. %s available: %s" % (
> ---> 39 driver_name, len(index_drivers()), ', '.join(index_drivers())
> 40 )
> 41 )
>
> RuntimeError: No index driver found for 'default'. 0 available:
I've reinstalled PostgreSQL and all libraries, to no avail.
All suggestions are welcome.
**edit:** While ingestion I get this warning
> datacube.drivers.driver_cache WARNING Failed to resolve driver datacube.plugins.io.write::s3aio
> datacube.drivers.driver_cache WARNING Failed to resolve driver datacube.plugins.io.write::s3aio_test
where do I update the driver from? Is it because of this plugin?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/drivers/driver_cache.py`
Content:
```
1 from __future__ import absolute_import, print_function
2
3 import logging
4
5 from pkg_resources import iter_entry_points
6
7 _LOG = logging.getLogger(__name__)
8
9
10 def load_drivers(group):
11 """
12 Load available drivers for a given group name.
13
14 Gracefully handles:
15
16 - Driver module not able to be imported
17 - Driver init function throwing an exception or returning None
18
19 By having driver entry_points pointing to a function, we defer loading the driver
20 module or running any code until required.
21
22 :param str group: Name of the entry point group e.g. "datacube.plugins.io.read"
23
24 :returns: Dictionary String -> Driver Object
25 """
26
27 def safe_load(ep):
28 # pylint: disable=bare-except
29 try:
30 driver_init = ep.resolve()
31 except:
32 _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
33 return None
34
35 try:
36 driver = driver_init()
37 except:
38 _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)
39 return None
40
41 if driver is None:
42 _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)
43
44 return driver
45
46 def resolve_all(group):
47 for ep in iter_entry_points(group=group, name=None):
48 driver = safe_load(ep)
49 if driver is not None:
50 yield (ep.name, driver)
51
52 return dict((name, driver) for name, driver in resolve_all(group))
53
```
Path: `datacube/drivers/indexes.py`
Content:
```
1 from __future__ import absolute_import
2
3 from .driver_cache import load_drivers
4
5
6 class IndexDriverCache(object):
7 def __init__(self, group):
8 self._drivers = load_drivers(group)
9
10 for driver in list(self._drivers.values()):
11 if hasattr(driver, 'aliases'):
12 for alias in driver.aliases:
13 self._drivers[alias] = driver
14
15 def __call__(self, name):
16 """
17 :returns: None if driver with a given name is not found
18
19 :param str name: Driver name
20 :param str fmt: Dataset format
21 :return: Returns WriterDriver
22 """
23 return self._drivers.get(name, None)
24
25 def drivers(self):
26 """ Returns list of driver names
27 """
28 return list(self._drivers.keys())
29
30
31 def index_cache():
32 """ Singleton for WriterDriverCache
33 """
34 # pylint: disable=protected-access
35 if not hasattr(index_cache, '_instance'):
36 index_cache._instance = IndexDriverCache('datacube.plugins.index')
37 return index_cache._instance
38
39
40 def index_drivers():
41 """ Returns list driver names
42 """
43 return index_cache().drivers()
44
45
46 def index_driver_by_name(name):
47 """ Lookup writer driver by name
48
49 :returns: Initialised writer driver instance
50 :returns: None if driver with this name doesn't exist
51 """
52 return index_cache()(name)
53
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import versioneer
4 from setuptools import setup, find_packages
5
6 tests_require = [
7 'compliance-checker',
8 'hypothesis',
9 'mock',
10 'objgraph',
11 'pycodestyle',
12 'pylint',
13 'pytest',
14 'pytest-cov',
15 'pytest-timeout',
16 ]
17
18 extras_require = {
19 'performance': ['ciso8601', 'bottleneck'],
20 'interactive': ['matplotlib', 'fiona'],
21 'distributed': ['distributed', 'dask[distributed]'],
22 'analytics': ['scipy', 'pyparsing', 'numexpr'],
23 'doc': ['Sphinx', 'setuptools'],
24 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],
25 'celery': ['celery>=4', 'redis'],
26 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],
27 'test': tests_require,
28 }
29 # An 'all' option, following ipython naming conventions.
30 extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
31
32 setup(
33 name='datacube',
34 version=versioneer.get_version(),
35 cmdclass=versioneer.get_cmdclass(),
36 python_requires='>=3.5.2',
37
38 url='https://github.com/opendatacube/datacube-core',
39 author='AGDC Collaboration',
40 maintainer='AGDC Collaboration',
41 maintainer_email='',
42 description='An analysis environment for satellite and other earth observation data',
43 long_description=open('README.rst').read(),
44 license='Apache License 2.0',
45 classifiers=[
46 "Development Status :: 4 - Beta",
47 "Intended Audience :: Developers",
48 "Intended Audience :: Science/Research",
49 "License :: OSI Approved :: Apache Software License",
50 "Natural Language :: English",
51 "Operating System :: MacOS :: MacOS X",
52 "Operating System :: POSIX",
53 "Operating System :: POSIX :: BSD",
54 "Operating System :: POSIX :: Linux",
55 "Operating System :: Microsoft :: Windows",
56 "Programming Language :: Python",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.5",
59 "Programming Language :: Python :: 3.6",
60 "Topic :: Scientific/Engineering :: GIS",
61 "Topic :: Scientific/Engineering :: Information Analysis",
62 ],
63
64 packages=find_packages(
65 exclude=('tests', 'tests.*',
66 'integration_tests', 'integration_tests.*')
67 ),
68 package_data={
69 '': ['*.yaml', '*/*.yaml'],
70 },
71 scripts=[
72 'datacube_apps/scripts/pbs_helpers.sh'
73 ],
74 setup_requires=[
75 'pytest-runner'
76 ],
77 install_requires=[
78 'affine',
79 'cachetools',
80 'click>=5.0',
81 'cloudpickle>=0.4',
82 'dask[array]',
83 'gdal>=1.9',
84 'jsonschema',
85 'netcdf4',
86 'numpy',
87 'psycopg2',
88 'pypeg2',
89 'python-dateutil',
90 'pyyaml',
91 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems
92 'singledispatch',
93 'sqlalchemy',
94 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost
95 ],
96 extras_require=extras_require,
97 tests_require=tests_require,
98
99 entry_points={
100 'console_scripts': [
101 'datacube = datacube.scripts.cli_app:cli',
102 'datacube-search = datacube.scripts.search_tool:cli',
103 'datacube-stacker = datacube_apps.stacker:main',
104 'datacube-worker = datacube.execution.worker:main',
105 'datacube-fixer = datacube_apps.stacker:fixer_main',
106 'datacube-ncml = datacube_apps.ncml:ncml_app',
107 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',
108 'movie_generator = datacube_apps.movie_generator:main',
109 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'
110 ],
111 'datacube.plugins.io.read': [
112 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
113 's3aio = datacube.drivers.s3.driver:reader_driver_init',
114 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'
115 ],
116 'datacube.plugins.io.write': [
117 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
118 's3aio = datacube.drivers.s3.driver:writer_driver_init',
119 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',
120 ],
121 'datacube.plugins.index': [
122 'default = datacube.index.index:index_driver_init',
123 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',
124 ],
125 },
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py
--- a/datacube/drivers/driver_cache.py
+++ b/datacube/drivers/driver_cache.py
@@ -2,7 +2,7 @@
import logging
-from pkg_resources import iter_entry_points
+from pkg_resources import iter_entry_points, DistributionNotFound
_LOG = logging.getLogger(__name__)
@@ -27,7 +27,11 @@
def safe_load(ep):
# pylint: disable=bare-except
try:
- driver_init = ep.resolve()
+ driver_init = ep.load()
+ except DistributionNotFound:
+ # This happens when entry points were marked with extra features,
+ # but extra feature were not requested for installation
+ return None
except:
_LOG.warning('Failed to resolve driver %s::%s', group, ep.name)
return None
diff --git a/datacube/drivers/indexes.py b/datacube/drivers/indexes.py
--- a/datacube/drivers/indexes.py
+++ b/datacube/drivers/indexes.py
@@ -7,6 +7,10 @@
def __init__(self, group):
self._drivers = load_drivers(group)
+ if len(self._drivers) == 0:
+ from datacube.index.index import index_driver_init
+ self._drivers = dict(default=index_driver_init())
+
for driver in list(self._drivers.values()):
if hasattr(driver, 'aliases'):
for alias in driver.aliases:
@@ -17,8 +21,7 @@
:returns: None if driver with a given name is not found
:param str name: Driver name
- :param str fmt: Dataset format
- :return: Returns WriterDriver
+ :return: Returns IndexDriver
"""
return self._drivers.get(name, None)
@@ -29,7 +32,7 @@
def index_cache():
- """ Singleton for WriterDriverCache
+ """ Singleton for IndexDriverCache
"""
# pylint: disable=protected-access
if not hasattr(index_cache, '_instance'):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,6 +2,7 @@
import versioneer
from setuptools import setup, find_packages
+import os
tests_require = [
'compliance-checker',
@@ -29,6 +30,22 @@
# An 'all' option, following ipython naming conventions.
extras_require['all'] = sorted(set(sum(extras_require.values(), [])))
+extra_plugins = dict(read=[], write=[], index=[])
+
+if os.name != 'nt':
+ extra_plugins['read'].extend([
+ 's3aio = datacube.drivers.s3.driver:reader_driver_init [s3]',
+ 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init [s3]',
+ ])
+ extra_plugins['write'].extend([
+ 's3aio = datacube.drivers.s3.driver:writer_driver_init [s3]',
+ 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init [s3]',
+ ])
+
+ extra_plugins['index'].extend([
+ 's3aio_index = datacube.drivers.s3aio_index:index_driver_init [s3]',
+ ])
+
setup(
name='datacube',
version=versioneer.get_version(),
@@ -110,17 +127,15 @@
],
'datacube.plugins.io.read': [
'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',
- 's3aio = datacube.drivers.s3.driver:reader_driver_init',
- 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'
+ *extra_plugins['read'],
],
'datacube.plugins.io.write': [
'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',
- 's3aio = datacube.drivers.s3.driver:writer_driver_init',
- 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',
+ *extra_plugins['write'],
],
'datacube.plugins.index': [
'default = datacube.index.index:index_driver_init',
- 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',
+ *extra_plugins['index'],
],
},
)
|
{"golden_diff": "diff --git a/datacube/drivers/driver_cache.py b/datacube/drivers/driver_cache.py\n--- a/datacube/drivers/driver_cache.py\n+++ b/datacube/drivers/driver_cache.py\n@@ -2,7 +2,7 @@\n \n import logging\n \n-from pkg_resources import iter_entry_points\n+from pkg_resources import iter_entry_points, DistributionNotFound\n \n _LOG = logging.getLogger(__name__)\n \n@@ -27,7 +27,11 @@\n def safe_load(ep):\n # pylint: disable=bare-except\n try:\n- driver_init = ep.resolve()\n+ driver_init = ep.load()\n+ except DistributionNotFound:\n+ # This happens when entry points were marked with extra features,\n+ # but extra feature were not requested for installation\n+ return None\n except:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n return None\ndiff --git a/datacube/drivers/indexes.py b/datacube/drivers/indexes.py\n--- a/datacube/drivers/indexes.py\n+++ b/datacube/drivers/indexes.py\n@@ -7,6 +7,10 @@\n def __init__(self, group):\n self._drivers = load_drivers(group)\n \n+ if len(self._drivers) == 0:\n+ from datacube.index.index import index_driver_init\n+ self._drivers = dict(default=index_driver_init())\n+\n for driver in list(self._drivers.values()):\n if hasattr(driver, 'aliases'):\n for alias in driver.aliases:\n@@ -17,8 +21,7 @@\n :returns: None if driver with a given name is not found\n \n :param str name: Driver name\n- :param str fmt: Dataset format\n- :return: Returns WriterDriver\n+ :return: Returns IndexDriver\n \"\"\"\n return self._drivers.get(name, None)\n \n@@ -29,7 +32,7 @@\n \n \n def index_cache():\n- \"\"\" Singleton for WriterDriverCache\n+ \"\"\" Singleton for IndexDriverCache\n \"\"\"\n # pylint: disable=protected-access\n if not hasattr(index_cache, '_instance'):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,6 +2,7 @@\n \n import versioneer\n from setuptools import setup, find_packages\n+import os\n \n tests_require = [\n 'compliance-checker',\n@@ -29,6 +30,22 @@\n # An 'all' option, following ipython naming conventions.\n extras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n \n+extra_plugins = dict(read=[], write=[], index=[])\n+\n+if os.name != 'nt':\n+ extra_plugins['read'].extend([\n+ 's3aio = datacube.drivers.s3.driver:reader_driver_init [s3]',\n+ 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init [s3]',\n+ ])\n+ extra_plugins['write'].extend([\n+ 's3aio = datacube.drivers.s3.driver:writer_driver_init [s3]',\n+ 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init [s3]',\n+ ])\n+\n+ extra_plugins['index'].extend([\n+ 's3aio_index = datacube.drivers.s3aio_index:index_driver_init [s3]',\n+ ])\n+\n setup(\n name='datacube',\n version=versioneer.get_version(),\n@@ -110,17 +127,15 @@\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n- 's3aio = datacube.drivers.s3.driver:reader_driver_init',\n- 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'\n+ *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n- 's3aio = datacube.drivers.s3.driver:writer_driver_init',\n- 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',\n+ *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n- 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',\n+ *extra_plugins['index'],\n ],\n },\n )\n", "issue": "No index driver found for 'default'\nGreetings!\r\n\r\nUsing the latest Datacube version and libraries, indexing and ingestion runs well except when I try to access the database on Jupyter I get the \"No index driver found for 'default': 0 available: \" error.\r\n\r\n> RuntimeError Traceback (most recent call last)\r\n> ipython-input-2-f074b535470c> in <module>()\r\n> 1 \r\n> 2 import datacube\r\n> ----> 3 dc = datacube.Datacube(config='E:\\Datacubes\\datacube-core-develop\\datacube.conf')\r\n> 4 dc\r\n> \r\n> E:\\Datacubes\\datacube-core-develop\\datacube\\api\\core.py in __init__(self, index, config, app, env, validate_connection)\r\n> 116 index = index_connect(normalise_config(config),\r\n> 117 application_name=app,\r\n> --> 118 validate_connection=validate_connection)\r\n> 119 \r\n> 120 self.index = index\r\n> \r\n> E:\\Datacubes\\datacube-core-develop\\datacube\\index\\_api.py in index_connect(local_config, application_name, validate_connection)\r\n> 37 raise RuntimeError(\r\n> 38 \"No index driver found for %r. %s available: %s\" % (\r\n> ---> 39 driver_name, len(index_drivers()), ', '.join(index_drivers())\r\n> 40 )\r\n> 41 )\r\n> \r\n> RuntimeError: No index driver found for 'default'. 0 available: \r\n\r\nI've reinstalled PostgreSQL and all libraries, to no avail.\r\nAll suggestions are welcome.\r\n\r\n**edit:** While ingestion I get this warning\r\n\r\n> datacube.drivers.driver_cache WARNING Failed to resolve driver datacube.plugins.io.write::s3aio\r\n> datacube.drivers.driver_cache WARNING Failed to resolve driver datacube.plugins.io.write::s3aio_test\r\n\r\nwhere do I update the driver from? Is it because of this plugin?\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\n\nfrom pkg_resources import iter_entry_points\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group):\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param str group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=bare-except\n try:\n driver_init = ep.resolve()\n except:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n return None\n\n try:\n driver = driver_init()\n except:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group):\n for ep in iter_entry_points(group=group, name=None):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "from __future__ import absolute_import\n\nfrom .driver_cache import load_drivers\n\n\nclass IndexDriverCache(object):\n def __init__(self, group):\n self._drivers = load_drivers(group)\n\n for driver in list(self._drivers.values()):\n if hasattr(driver, 'aliases'):\n for alias in driver.aliases:\n self._drivers[alias] = driver\n\n def __call__(self, name):\n \"\"\"\n :returns: None if driver with a given name is not found\n\n :param str name: Driver name\n :param str fmt: Dataset format\n :return: Returns WriterDriver\n \"\"\"\n return self._drivers.get(name, None)\n\n def drivers(self):\n \"\"\" Returns list of driver names\n \"\"\"\n return list(self._drivers.keys())\n\n\ndef index_cache():\n \"\"\" Singleton for WriterDriverCache\n \"\"\"\n # pylint: disable=protected-access\n if not hasattr(index_cache, '_instance'):\n index_cache._instance = IndexDriverCache('datacube.plugins.index')\n return index_cache._instance\n\n\ndef index_drivers():\n \"\"\" Returns list driver names\n \"\"\"\n return index_cache().drivers()\n\n\ndef index_driver_by_name(name):\n \"\"\" Lookup writer driver by name\n\n :returns: Initialised writer driver instance\n :returns: None if driver with this name doesn't exist\n \"\"\"\n return index_cache()(name)\n", "path": "datacube/drivers/indexes.py"}, {"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'compliance-checker',\n 'hypothesis',\n 'mock',\n 'objgraph',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n python_requires='>=3.5.2',\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n 's3aio = datacube.drivers.s3.driver:reader_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init'\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n 's3aio = datacube.drivers.s3.driver:writer_driver_init',\n 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init',\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n 's3aio_index = datacube.drivers.s3aio_index:index_driver_init',\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\n\nfrom pkg_resources import iter_entry_points, DistributionNotFound\n\n_LOG = logging.getLogger(__name__)\n\n\ndef load_drivers(group):\n \"\"\"\n Load available drivers for a given group name.\n\n Gracefully handles:\n\n - Driver module not able to be imported\n - Driver init function throwing an exception or returning None\n\n By having driver entry_points pointing to a function, we defer loading the driver\n module or running any code until required.\n\n :param str group: Name of the entry point group e.g. \"datacube.plugins.io.read\"\n\n :returns: Dictionary String -> Driver Object\n \"\"\"\n\n def safe_load(ep):\n # pylint: disable=bare-except\n try:\n driver_init = ep.load()\n except DistributionNotFound:\n # This happens when entry points were marked with extra features,\n # but extra feature were not requested for installation\n return None\n except:\n _LOG.warning('Failed to resolve driver %s::%s', group, ep.name)\n return None\n\n try:\n driver = driver_init()\n except:\n _LOG.warning('Exception during driver init, driver name: %s::%s', group, ep.name)\n return None\n\n if driver is None:\n _LOG.warning('Driver init returned None, driver name: %s::%s', group, ep.name)\n\n return driver\n\n def resolve_all(group):\n for ep in iter_entry_points(group=group, name=None):\n driver = safe_load(ep)\n if driver is not None:\n yield (ep.name, driver)\n\n return dict((name, driver) for name, driver in resolve_all(group))\n", "path": "datacube/drivers/driver_cache.py"}, {"content": "from __future__ import absolute_import\n\nfrom .driver_cache import load_drivers\n\n\nclass IndexDriverCache(object):\n def __init__(self, group):\n self._drivers = load_drivers(group)\n\n if len(self._drivers) == 0:\n from datacube.index.index import index_driver_init\n self._drivers = dict(default=index_driver_init())\n\n for driver in list(self._drivers.values()):\n if hasattr(driver, 'aliases'):\n for alias in driver.aliases:\n self._drivers[alias] = driver\n\n def __call__(self, name):\n \"\"\"\n :returns: None if driver with a given name is not found\n\n :param str name: Driver name\n :return: Returns IndexDriver\n \"\"\"\n return self._drivers.get(name, None)\n\n def drivers(self):\n \"\"\" Returns list of driver names\n \"\"\"\n return list(self._drivers.keys())\n\n\ndef index_cache():\n \"\"\" Singleton for IndexDriverCache\n \"\"\"\n # pylint: disable=protected-access\n if not hasattr(index_cache, '_instance'):\n index_cache._instance = IndexDriverCache('datacube.plugins.index')\n return index_cache._instance\n\n\ndef index_drivers():\n \"\"\" Returns list driver names\n \"\"\"\n return index_cache().drivers()\n\n\ndef index_driver_by_name(name):\n \"\"\" Lookup writer driver by name\n\n :returns: Initialised writer driver instance\n :returns: None if driver with this name doesn't exist\n \"\"\"\n return index_cache()(name)\n", "path": "datacube/drivers/indexes.py"}, {"content": "#!/usr/bin/env python\n\nimport versioneer\nfrom setuptools import setup, find_packages\nimport os\n\ntests_require = [\n 'compliance-checker',\n 'hypothesis',\n 'mock',\n 'objgraph',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'interactive': ['matplotlib', 'fiona'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'analytics': ['scipy', 'pyparsing', 'numexpr'],\n 'doc': ['Sphinx', 'setuptools'],\n 'replicas': ['paramiko', 'sshtunnel', 'tqdm'],\n 'celery': ['celery>=4', 'redis'],\n 's3': ['boto3==1.4.3', 'SharedArray', 'pathos', 'zstandard'],\n 'test': tests_require,\n}\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nif os.name != 'nt':\n extra_plugins['read'].extend([\n 's3aio = datacube.drivers.s3.driver:reader_driver_init [s3]',\n 's3aio_test = datacube.drivers.s3.driver:reader_test_driver_init [s3]',\n ])\n extra_plugins['write'].extend([\n 's3aio = datacube.drivers.s3.driver:writer_driver_init [s3]',\n 's3aio_test = datacube.drivers.s3.driver:writer_test_driver_init [s3]',\n ])\n\n extra_plugins['index'].extend([\n 's3aio_index = datacube.drivers.s3aio_index:index_driver_init [s3]',\n ])\n\nsetup(\n name='datacube',\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n python_requires='>=3.5.2',\n\n url='https://github.com/opendatacube/datacube-core',\n author='AGDC Collaboration',\n maintainer='AGDC Collaboration',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[\n 'datacube_apps/scripts/pbs_helpers.sh'\n ],\n setup_requires=[\n 'pytest-runner'\n ],\n install_requires=[\n 'affine',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'gdal>=1.9',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'pypeg2',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=0.9a10', # required for zip reading, 0.9 gets around 1.0a ordering problems\n 'singledispatch',\n 'sqlalchemy',\n 'xarray>=0.9', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-stacker = datacube_apps.stacker:main',\n 'datacube-worker = datacube.execution.worker:main',\n 'datacube-fixer = datacube_apps.stacker:fixer_main',\n 'datacube-ncml = datacube_apps.ncml:ncml_app',\n 'pixeldrill = datacube_apps.pixeldrill:main [interactive]',\n 'movie_generator = datacube_apps.movie_generator:main',\n 'datacube-simple-replica = datacube_apps.simple_replica:replicate [replicas]'\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]}
| 2,940 | 984 |
gh_patches_debug_5603
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15611
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty search entries are being created for device asset tags
### Deployment Type
NetBox Cloud
### NetBox Version
v3.7.4
### Python Version
3.11
### Steps to Reproduce
1. Create a new device and note its database ID
2. In the NetBox shell, inspect all search entries associated with it:
```python
ct = ContentType.objects.get_for_model(Device)
device_id = 107
entries = CachedValue.objects.filter(object_type=ct, object_id=device_id)
for entry in entries:
print(f'{entry.field}: {entry.value}')
```
### Expected Behavior
Only fields which have a meaningful value set should have search entries created.
### Observed Behavior
After creating a device with a description, I see three entries for it:
```
asset_tag: None
name: device1
description: asdasdasd
```
The value of `asset_tag` is null.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/search/__init__.py`
Content:
```
1 from collections import namedtuple
2
3 from django.db import models
4
5 from ipam.fields import IPAddressField, IPNetworkField
6 from netbox.registry import registry
7
8 ObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))
9
10
11 class FieldTypes:
12 FLOAT = 'float'
13 INTEGER = 'int'
14 STRING = 'str'
15 INET = 'inet'
16 CIDR = 'cidr'
17
18
19 class LookupTypes:
20 PARTIAL = 'icontains'
21 EXACT = 'iexact'
22 STARTSWITH = 'istartswith'
23 ENDSWITH = 'iendswith'
24 REGEX = 'iregex'
25
26
27 class SearchIndex:
28 """
29 Base class for building search indexes.
30
31 Attributes:
32 model: The model class for which this index is used.
33 category: The label of the group under which this indexer is categorized (for form field display). If none,
34 the name of the model's app will be used.
35 fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.
36 display_attrs: An iterable of additional object attributes to include when displaying search results.
37 """
38 model = None
39 category = None
40 fields = ()
41 display_attrs = ()
42
43 @staticmethod
44 def get_field_type(instance, field_name):
45 """
46 Return the data type of the specified model field.
47 """
48 field_cls = instance._meta.get_field(field_name).__class__
49 if issubclass(field_cls, (models.FloatField, models.DecimalField)):
50 return FieldTypes.FLOAT
51 if issubclass(field_cls, IPAddressField):
52 return FieldTypes.INET
53 if issubclass(field_cls, IPNetworkField):
54 return FieldTypes.CIDR
55 if issubclass(field_cls, models.IntegerField):
56 return FieldTypes.INTEGER
57 return FieldTypes.STRING
58
59 @staticmethod
60 def get_field_value(instance, field_name):
61 """
62 Return the value of the specified model field as a string.
63 """
64 return str(getattr(instance, field_name))
65
66 @classmethod
67 def get_category(cls):
68 return cls.category or cls.model._meta.app_config.verbose_name
69
70 @classmethod
71 def to_cache(cls, instance, custom_fields=None):
72 """
73 Return a list of ObjectFieldValue representing the instance fields to be cached.
74
75 Args:
76 instance: The instance being cached.
77 custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields
78 defined for the model will be included. (This can also be provided during bulk caching to avoid looking
79 up the available custom fields for each instance.)
80 """
81 values = []
82
83 # Capture built-in fields
84 for name, weight in cls.fields:
85 type_ = cls.get_field_type(instance, name)
86 value = cls.get_field_value(instance, name)
87 if type_ and value:
88 values.append(
89 ObjectFieldValue(name, type_, weight, value)
90 )
91
92 # Capture custom fields
93 if getattr(instance, 'custom_field_data', None):
94 if custom_fields is None:
95 custom_fields = instance.custom_fields
96 for cf in custom_fields:
97 type_ = cf.search_type
98 value = instance.custom_field_data.get(cf.name)
99 weight = cf.search_weight
100 if type_ and value and weight:
101 values.append(
102 ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)
103 )
104
105 return values
106
107
108 def get_indexer(model):
109 """
110 Get the SearchIndex class for the given model.
111 """
112 label = f'{model._meta.app_label}.{model._meta.model_name}'
113
114 return registry['search'][label]
115
116
117 def register_search(cls):
118 """
119 Decorator for registering a SearchIndex class.
120 """
121 model = cls.model
122 label = f'{model._meta.app_label}.{model._meta.model_name}'
123 registry['search'][label] = cls
124
125 return cls
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/search/__init__.py b/netbox/netbox/search/__init__.py
--- a/netbox/netbox/search/__init__.py
+++ b/netbox/netbox/search/__init__.py
@@ -59,9 +59,10 @@
@staticmethod
def get_field_value(instance, field_name):
"""
- Return the value of the specified model field as a string.
+ Return the value of the specified model field as a string (or None).
"""
- return str(getattr(instance, field_name))
+ if value := getattr(instance, field_name):
+ return str(value)
@classmethod
def get_category(cls):
|
{"golden_diff": "diff --git a/netbox/netbox/search/__init__.py b/netbox/netbox/search/__init__.py\n--- a/netbox/netbox/search/__init__.py\n+++ b/netbox/netbox/search/__init__.py\n@@ -59,9 +59,10 @@\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n- Return the value of the specified model field as a string.\n+ Return the value of the specified model field as a string (or None).\n \"\"\"\n- return str(getattr(instance, field_name))\n+ if value := getattr(instance, field_name):\n+ return str(value)\n \n @classmethod\n def get_category(cls):\n", "issue": "Empty search entries are being created for device asset tags\n### Deployment Type\n\nNetBox Cloud\n\n### NetBox Version\n\nv3.7.4\n\n### Python Version\n\n3.11\n\n### Steps to Reproduce\n\n1. Create a new device and note its database ID\r\n2. In the NetBox shell, inspect all search entries associated with it:\r\n\r\n```python\r\nct = ContentType.objects.get_for_model(Device)\r\ndevice_id = 107\r\nentries = CachedValue.objects.filter(object_type=ct, object_id=device_id)\r\nfor entry in entries:\r\n print(f'{entry.field}: {entry.value}')\r\n```\n\n### Expected Behavior\n\nOnly fields which have a meaningful value set should have search entries created.\n\n### Observed Behavior\n\nAfter creating a device with a description, I see three entries for it:\r\n\r\n```\r\nasset_tag: None\r\nname: device1\r\ndescription: asdasdasd\r\n```\r\n\r\nThe value of `asset_tag` is null.\n", "before_files": [{"content": "from collections import namedtuple\n\nfrom django.db import models\n\nfrom ipam.fields import IPAddressField, IPNetworkField\nfrom netbox.registry import registry\n\nObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))\n\n\nclass FieldTypes:\n FLOAT = 'float'\n INTEGER = 'int'\n STRING = 'str'\n INET = 'inet'\n CIDR = 'cidr'\n\n\nclass LookupTypes:\n PARTIAL = 'icontains'\n EXACT = 'iexact'\n STARTSWITH = 'istartswith'\n ENDSWITH = 'iendswith'\n REGEX = 'iregex'\n\n\nclass SearchIndex:\n \"\"\"\n Base class for building search indexes.\n\n Attributes:\n model: The model class for which this index is used.\n category: The label of the group under which this indexer is categorized (for form field display). If none,\n the name of the model's app will be used.\n fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.\n display_attrs: An iterable of additional object attributes to include when displaying search results.\n \"\"\"\n model = None\n category = None\n fields = ()\n display_attrs = ()\n\n @staticmethod\n def get_field_type(instance, field_name):\n \"\"\"\n Return the data type of the specified model field.\n \"\"\"\n field_cls = instance._meta.get_field(field_name).__class__\n if issubclass(field_cls, (models.FloatField, models.DecimalField)):\n return FieldTypes.FLOAT\n if issubclass(field_cls, IPAddressField):\n return FieldTypes.INET\n if issubclass(field_cls, IPNetworkField):\n return FieldTypes.CIDR\n if issubclass(field_cls, models.IntegerField):\n return FieldTypes.INTEGER\n return FieldTypes.STRING\n\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n Return the value of the specified model field as a string.\n \"\"\"\n return str(getattr(instance, field_name))\n\n @classmethod\n def get_category(cls):\n return cls.category or cls.model._meta.app_config.verbose_name\n\n @classmethod\n def to_cache(cls, instance, custom_fields=None):\n \"\"\"\n Return a list of ObjectFieldValue representing the instance fields to be cached.\n\n Args:\n instance: The instance being cached.\n custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields\n defined for the model will be included. (This can also be provided during bulk caching to avoid looking\n up the available custom fields for each instance.)\n \"\"\"\n values = []\n\n # Capture built-in fields\n for name, weight in cls.fields:\n type_ = cls.get_field_type(instance, name)\n value = cls.get_field_value(instance, name)\n if type_ and value:\n values.append(\n ObjectFieldValue(name, type_, weight, value)\n )\n\n # Capture custom fields\n if getattr(instance, 'custom_field_data', None):\n if custom_fields is None:\n custom_fields = instance.custom_fields\n for cf in custom_fields:\n type_ = cf.search_type\n value = instance.custom_field_data.get(cf.name)\n weight = cf.search_weight\n if type_ and value and weight:\n values.append(\n ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)\n )\n\n return values\n\n\ndef get_indexer(model):\n \"\"\"\n Get the SearchIndex class for the given model.\n \"\"\"\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n\n return registry['search'][label]\n\n\ndef register_search(cls):\n \"\"\"\n Decorator for registering a SearchIndex class.\n \"\"\"\n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls\n", "path": "netbox/netbox/search/__init__.py"}], "after_files": [{"content": "from collections import namedtuple\n\nfrom django.db import models\n\nfrom ipam.fields import IPAddressField, IPNetworkField\nfrom netbox.registry import registry\n\nObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))\n\n\nclass FieldTypes:\n FLOAT = 'float'\n INTEGER = 'int'\n STRING = 'str'\n INET = 'inet'\n CIDR = 'cidr'\n\n\nclass LookupTypes:\n PARTIAL = 'icontains'\n EXACT = 'iexact'\n STARTSWITH = 'istartswith'\n ENDSWITH = 'iendswith'\n REGEX = 'iregex'\n\n\nclass SearchIndex:\n \"\"\"\n Base class for building search indexes.\n\n Attributes:\n model: The model class for which this index is used.\n category: The label of the group under which this indexer is categorized (for form field display). If none,\n the name of the model's app will be used.\n fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.\n display_attrs: An iterable of additional object attributes to include when displaying search results.\n \"\"\"\n model = None\n category = None\n fields = ()\n display_attrs = ()\n\n @staticmethod\n def get_field_type(instance, field_name):\n \"\"\"\n Return the data type of the specified model field.\n \"\"\"\n field_cls = instance._meta.get_field(field_name).__class__\n if issubclass(field_cls, (models.FloatField, models.DecimalField)):\n return FieldTypes.FLOAT\n if issubclass(field_cls, IPAddressField):\n return FieldTypes.INET\n if issubclass(field_cls, IPNetworkField):\n return FieldTypes.CIDR\n if issubclass(field_cls, models.IntegerField):\n return FieldTypes.INTEGER\n return FieldTypes.STRING\n\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n Return the value of the specified model field as a string (or None).\n \"\"\"\n if value := getattr(instance, field_name):\n return str(value)\n\n @classmethod\n def get_category(cls):\n return cls.category or cls.model._meta.app_config.verbose_name\n\n @classmethod\n def to_cache(cls, instance, custom_fields=None):\n \"\"\"\n Return a list of ObjectFieldValue representing the instance fields to be cached.\n\n Args:\n instance: The instance being cached.\n custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields\n defined for the model will be included. (This can also be provided during bulk caching to avoid looking\n up the available custom fields for each instance.)\n \"\"\"\n values = []\n\n # Capture built-in fields\n for name, weight in cls.fields:\n type_ = cls.get_field_type(instance, name)\n value = cls.get_field_value(instance, name)\n if type_ and value:\n values.append(\n ObjectFieldValue(name, type_, weight, value)\n )\n\n # Capture custom fields\n if getattr(instance, 'custom_field_data', None):\n if custom_fields is None:\n custom_fields = instance.custom_fields\n for cf in custom_fields:\n type_ = cf.search_type\n value = instance.custom_field_data.get(cf.name)\n weight = cf.search_weight\n if type_ and value and weight:\n values.append(\n ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)\n )\n\n return values\n\n\ndef get_indexer(model):\n \"\"\"\n Get the SearchIndex class for the given model.\n \"\"\"\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n\n return registry['search'][label]\n\n\ndef register_search(cls):\n \"\"\"\n Decorator for registering a SearchIndex class.\n \"\"\"\n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls\n", "path": "netbox/netbox/search/__init__.py"}]}
| 1,573 | 149 |
gh_patches_debug_9209
|
rasdani/github-patches
|
git_diff
|
dynaconf__dynaconf-223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dynaconf.contrib.flask_dynaconf.DynaconfConfig to flask.config.Config
Hello, is there a way to convert a dynaconf.contrib.flask_dynaconf.DynaconfConfig object into a flask.config.Config one?
Otherwise, is there a way to convert dynaconf.contrib.flask_dynaconf.DynaconfConfig into a dict?
I have been struggling trying to pass a dynaconf.contrib.flask_dynaconf.DynaconfConfig to a Flask Cache constructor as a config. With flask.config.Config it works but with the dynaconf class it doesn't :-/.
cache = Cache().init_app(app, app.config)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynaconf/default_settings.py`
Content:
```
1 import importlib
2 import os
3 import sys
4 import warnings
5
6 from dynaconf.utils import raw_logger
7 from dynaconf.utils import RENAMED_VARS
8 from dynaconf.utils import warn_deprecations
9 from dynaconf.utils.files import find_file
10 from dynaconf.utils.parse_conf import parse_conf_data
11
12 try:
13 from dotenv import load_dotenv
14 except ImportError: # pragma: no cover
15 load_dotenv = lambda *args, **kwargs: None # noqa
16
17
18 def try_renamed(key, value, older_key, current_key):
19 if value is None:
20 if key == current_key:
21 if older_key in os.environ:
22 warnings.warn(
23 "{0} is deprecated please use {1}".format(
24 older_key, current_key
25 ),
26 DeprecationWarning,
27 )
28 value = os.environ[older_key]
29 return value
30
31
32 def get(key, default=None):
33 value = os.environ.get(key.upper())
34
35 # compatibility with renamed variables
36 for old, new in RENAMED_VARS.items():
37 value = try_renamed(key, value, old, new)
38
39 return (
40 parse_conf_data(value, tomlfy=True) if value is not None else default
41 )
42
43
44 def start_dotenv(obj=None, root_path=None):
45 # load_from_dotenv_if_installed
46 obj = obj or {}
47 _find_file = getattr(obj, "find_file", find_file)
48 root_path = (
49 root_path
50 or getattr(obj, "_root_path", None)
51 or get("ROOT_PATH_FOR_DYNACONF")
52 )
53 raw_logger().debug(
54 "Starting Dynaconf Dotenv %s",
55 "for {0}".format(root_path) if root_path else "Base",
56 )
57
58 dotenv_path = (
59 obj.get("DOTENV_PATH_FOR_DYNACONF")
60 or get("DOTENV_PATH_FOR_DYNACONF")
61 or _find_file(".env", project_root=root_path)
62 )
63
64 load_dotenv(
65 dotenv_path,
66 verbose=obj.get("DOTENV_VERBOSE_FOR_DYNACONF", False),
67 override=obj.get("DOTENV_OVERRIDE_FOR_DYNACONF", False),
68 )
69
70 warn_deprecations(os.environ)
71
72
73 def reload(*args, **kwargs):
74 start_dotenv(*args, **kwargs)
75 importlib.reload(sys.modules[__name__])
76
77
78 # default proj root
79 # pragma: no cover
80 ROOT_PATH_FOR_DYNACONF = get("ROOT_PATH_FOR_DYNACONF", None)
81
82 # Default settings file
83 default_paths = (
84 "settings.py,.secrets.py,"
85 "settings.toml,settings.tml,.secrets.toml,.secrets.tml,"
86 "settings.yaml,settings.yml,.secrets.yaml,.secrets.yml,"
87 "settings.ini,settings.conf,settings.properties,"
88 ".secrets.ini,.secrets.conf,.secrets.properties,"
89 "settings.json,.secrets.json"
90 )
91 SETTINGS_FILE_FOR_DYNACONF = get("SETTINGS_FILE_FOR_DYNACONF", default_paths)
92
93 # # ENV SETTINGS
94 # # In dynaconf 1.0.0 `NAMESPACE` got renamed to `ENV`
95
96 # The environment variable to switch current env
97 ENV_SWITCHER_FOR_DYNACONF = get(
98 "ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
99 )
100
101 # The current env by default is DEVELOPMENT
102 # to switch is needed to `export ENV_FOR_DYNACONF=PRODUCTION`
103 # or put that value in .env file
104 # this value is used only when reading files like .toml|yaml|ini|json
105 ENV_FOR_DYNACONF = get(ENV_SWITCHER_FOR_DYNACONF, "DEVELOPMENT")
106
107 # Default values is taken from DEFAULT pseudo env
108 # this value is used only when reading files like .toml|yaml|ini|json
109 DEFAULT_ENV_FOR_DYNACONF = get("DEFAULT_ENV_FOR_DYNACONF", "DEFAULT")
110
111 # Global values are taken from DYNACONF env used for exported envvars
112 # Values here overwrites all other envs
113 # This namespace is used for files and also envvars
114 ENVVAR_PREFIX_FOR_DYNACONF = get("ENVVAR_PREFIX_FOR_DYNACONF", "DYNACONF")
115
116 # The default encoding to open settings files
117 ENCODING_FOR_DYNACONF = get("ENCODING_FOR_DYNACONF", "utf-8")
118
119 # Merge objects on load
120 MERGE_ENABLED_FOR_DYNACONF = get("MERGE_ENABLED_FOR_DYNACONF", False)
121
122 # BY default `__` is the separator for nested env vars
123 # export `DYNACONF__DATABASE__server=server.com`
124 # export `DYNACONF__DATABASE__PORT=6666`
125 # Should result in settings.DATABASE == {'server': 'server.com', 'PORT': 6666}
126 # To disable it one can set `NESTED_SEPARATOR_FOR_DYNACONF=false`
127 NESTED_SEPARATOR_FOR_DYNACONF = get("NESTED_SEPARATOR_FOR_DYNACONF", "__")
128
129 # The env var specifying settings module
130 ENVVAR_FOR_DYNACONF = get("ENVVAR_FOR_DYNACONF", "SETTINGS_FILE_FOR_DYNACONF")
131
132 # Default values for redis configs
133 default_redis = {
134 "host": get("REDIS_HOST_FOR_DYNACONF", "localhost"),
135 "port": int(get("REDIS_PORT_FOR_DYNACONF", 6379)),
136 "db": int(get("REDIS_DB_FOR_DYNACONF", 0)),
137 "decode_responses": get("REDIS_DECODE_FOR_DYNACONF", True),
138 }
139 REDIS_FOR_DYNACONF = get("REDIS_FOR_DYNACONF", default_redis)
140 REDIS_ENABLED_FOR_DYNACONF = get("REDIS_ENABLED_FOR_DYNACONF", False)
141
142 # Hashicorp Vault Project
143 vault_scheme = get("VAULT_SCHEME_FOR_DYNACONF", "http")
144 vault_host = get("VAULT_HOST_FOR_DYNACONF", "localhost")
145 vault_port = get("VAULT_PORT_FOR_DYNACONF", "8200")
146 default_vault = {
147 "url": get(
148 "VAULT_URL_FOR_DYNACONF",
149 "{}://{}:{}".format(vault_scheme, vault_host, vault_port),
150 ),
151 "token": get("VAULT_TOKEN_FOR_DYNACONF", None),
152 "cert": get("VAULT_CERT_FOR_DYNACONF", None),
153 "verify": get("VAULT_VERIFY_FOR_DYNACONF", None),
154 "timeout": get("VAULT_TIMEOUT_FOR_DYNACONF", None),
155 "proxies": get("VAULT_PROXIES_FOR_DYNACONF", None),
156 "allow_redirects": get("VAULT_ALLOW_REDIRECTS_FOR_DYNACONF", None),
157 }
158 VAULT_FOR_DYNACONF = get("VAULT_FOR_DYNACONF", default_vault)
159 VAULT_ENABLED_FOR_DYNACONF = get("VAULT_ENABLED_FOR_DYNACONF", False)
160 VAULT_PATH_FOR_DYNACONF = get("VAULT_PATH_FOR_DYNACONF", "dynaconf")
161 VAULT_ROLE_ID_FOR_DYNACONF = get("VAULT_ROLE_ID_FOR_DYNACONF", None)
162 VAULT_SECRET_ID_FOR_DYNACONF = get("VAULT_SECRET_ID_FOR_DYNACONF", None)
163
164 # Only core loaders defined on this list will be invoked
165 core_loaders = ["YAML", "TOML", "INI", "JSON", "PY"]
166 CORE_LOADERS_FOR_DYNACONF = get("CORE_LOADERS_FOR_DYNACONF", core_loaders)
167
168 # External Loaders to read vars from different data stores
169 default_loaders = [
170 "dynaconf.loaders.env_loader",
171 # 'dynaconf.loaders.redis_loader'
172 # 'dynaconf.loaders.vault_loader'
173 ]
174 LOADERS_FOR_DYNACONF = get("LOADERS_FOR_DYNACONF", default_loaders)
175
176 # Errors in loaders should be silenced?
177 SILENT_ERRORS_FOR_DYNACONF = get("SILENT_ERRORS_FOR_DYNACONF", True)
178
179 # always fresh variables
180 FRESH_VARS_FOR_DYNACONF = get("FRESH_VARS_FOR_DYNACONF", [])
181
182 # debug
183 DEBUG_LEVEL_FOR_DYNACONF = get("DEBUG_LEVEL_FOR_DYNACONF", "NOTSET")
184
185 YAML = get("YAML", None)
186 TOML = get("TOML", None)
187 JSON = get("JSON", None)
188 INI = get("INI", None)
189
190 DOTENV_PATH_FOR_DYNACONF = get("DOTENV_PATH_FOR_DYNACONF", None)
191 DOTENV_VERBOSE_FOR_DYNACONF = get("DOTENV_VERBOSE_FOR_DYNACONF", False)
192 DOTENV_OVERRIDE_FOR_DYNACONF = get("DOTENV_OVERRIDE_FOR_DYNACONF", False)
193
194 # Currently this is only used by cli. INSTANCE_FOR_DYNACONF specifies python
195 # dotted path to custom LazySettings instance. Last dotted path item should be
196 # instance of LazySettings.
197 INSTANCE_FOR_DYNACONF = get("INSTANCE_FOR_DYNACONF", None)
198
199 # https://msg.pyyaml.org/load
200 YAML_LOADER_FOR_DYNACONF = get("YAML_LOADER_FOR_DYNACONF", "full_load")
201
202 # Use commentjson? https://commentjson.readthedocs.io/en/latest/
203 COMMENTJSON_ENABLED_FOR_DYNACONF = get(
204 "COMMENTJSON_ENABLED_FOR_DYNACONF", False
205 )
206
207 # Extra file, or list of files where to look for secrets
208 # useful for CI environment like jenkins
209 # where you can export this variable pointing to a local
210 # absolute path of the secrets file.
211 SECRETS_FOR_DYNACONF = get("SECRETS_FOR_DYNACONF", None)
212
213 # To include extra paths based on envvar
214 INCLUDES_FOR_DYNACONF = get("INCLUDES_FOR_DYNACONF", [])
215
216 # Files to skip if found on search tree
217 SKIP_FILES_FOR_DYNACONF = get("SKIP_FILES_FOR_DYNACONF", [])
218
219
220 # Backwards compatibility with renamed variables
221 for old, new in RENAMED_VARS.items():
222 setattr(sys.modules[__name__], old, locals()[new])
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dynaconf/default_settings.py b/dynaconf/default_settings.py
--- a/dynaconf/default_settings.py
+++ b/dynaconf/default_settings.py
@@ -182,11 +182,6 @@
# debug
DEBUG_LEVEL_FOR_DYNACONF = get("DEBUG_LEVEL_FOR_DYNACONF", "NOTSET")
-YAML = get("YAML", None)
-TOML = get("TOML", None)
-JSON = get("JSON", None)
-INI = get("INI", None)
-
DOTENV_PATH_FOR_DYNACONF = get("DOTENV_PATH_FOR_DYNACONF", None)
DOTENV_VERBOSE_FOR_DYNACONF = get("DOTENV_VERBOSE_FOR_DYNACONF", False)
DOTENV_OVERRIDE_FOR_DYNACONF = get("DOTENV_OVERRIDE_FOR_DYNACONF", False)
|
{"golden_diff": "diff --git a/dynaconf/default_settings.py b/dynaconf/default_settings.py\n--- a/dynaconf/default_settings.py\n+++ b/dynaconf/default_settings.py\n@@ -182,11 +182,6 @@\n # debug\n DEBUG_LEVEL_FOR_DYNACONF = get(\"DEBUG_LEVEL_FOR_DYNACONF\", \"NOTSET\")\n \n-YAML = get(\"YAML\", None)\n-TOML = get(\"TOML\", None)\n-JSON = get(\"JSON\", None)\n-INI = get(\"INI\", None)\n-\n DOTENV_PATH_FOR_DYNACONF = get(\"DOTENV_PATH_FOR_DYNACONF\", None)\n DOTENV_VERBOSE_FOR_DYNACONF = get(\"DOTENV_VERBOSE_FOR_DYNACONF\", False)\n DOTENV_OVERRIDE_FOR_DYNACONF = get(\"DOTENV_OVERRIDE_FOR_DYNACONF\", False)\n", "issue": "dynaconf.contrib.flask_dynaconf.DynaconfConfig to flask.config.Config\nHello, is there a way to convert a dynaconf.contrib.flask_dynaconf.DynaconfConfig object into a flask.config.Config one?\r\nOtherwise, is there a way to convert dynaconf.contrib.flask_dynaconf.DynaconfConfig into a dict?\r\n\r\nI have been struggling trying to pass a dynaconf.contrib.flask_dynaconf.DynaconfConfig to a Flask Cache constructor as a config. With flask.config.Config it works but with the dynaconf class it doesn't :-/.\r\n \r\ncache = Cache().init_app(app, app.config)\r\n\n", "before_files": [{"content": "import importlib\nimport os\nimport sys\nimport warnings\n\nfrom dynaconf.utils import raw_logger\nfrom dynaconf.utils import RENAMED_VARS\nfrom dynaconf.utils import warn_deprecations\nfrom dynaconf.utils.files import find_file\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\ntry:\n from dotenv import load_dotenv\nexcept ImportError: # pragma: no cover\n load_dotenv = lambda *args, **kwargs: None # noqa\n\n\ndef try_renamed(key, value, older_key, current_key):\n if value is None:\n if key == current_key:\n if older_key in os.environ:\n warnings.warn(\n \"{0} is deprecated please use {1}\".format(\n older_key, current_key\n ),\n DeprecationWarning,\n )\n value = os.environ[older_key]\n return value\n\n\ndef get(key, default=None):\n value = os.environ.get(key.upper())\n\n # compatibility with renamed variables\n for old, new in RENAMED_VARS.items():\n value = try_renamed(key, value, old, new)\n\n return (\n parse_conf_data(value, tomlfy=True) if value is not None else default\n )\n\n\ndef start_dotenv(obj=None, root_path=None):\n # load_from_dotenv_if_installed\n obj = obj or {}\n _find_file = getattr(obj, \"find_file\", find_file)\n root_path = (\n root_path\n or getattr(obj, \"_root_path\", None)\n or get(\"ROOT_PATH_FOR_DYNACONF\")\n )\n raw_logger().debug(\n \"Starting Dynaconf Dotenv %s\",\n \"for {0}\".format(root_path) if root_path else \"Base\",\n )\n\n dotenv_path = (\n obj.get(\"DOTENV_PATH_FOR_DYNACONF\")\n or get(\"DOTENV_PATH_FOR_DYNACONF\")\n or _find_file(\".env\", project_root=root_path)\n )\n\n load_dotenv(\n dotenv_path,\n verbose=obj.get(\"DOTENV_VERBOSE_FOR_DYNACONF\", False),\n override=obj.get(\"DOTENV_OVERRIDE_FOR_DYNACONF\", False),\n )\n\n warn_deprecations(os.environ)\n\n\ndef reload(*args, **kwargs):\n start_dotenv(*args, **kwargs)\n importlib.reload(sys.modules[__name__])\n\n\n# default proj root\n# pragma: no cover\nROOT_PATH_FOR_DYNACONF = get(\"ROOT_PATH_FOR_DYNACONF\", None)\n\n# Default settings file\ndefault_paths = (\n \"settings.py,.secrets.py,\"\n \"settings.toml,settings.tml,.secrets.toml,.secrets.tml,\"\n \"settings.yaml,settings.yml,.secrets.yaml,.secrets.yml,\"\n \"settings.ini,settings.conf,settings.properties,\"\n \".secrets.ini,.secrets.conf,.secrets.properties,\"\n \"settings.json,.secrets.json\"\n)\nSETTINGS_FILE_FOR_DYNACONF = get(\"SETTINGS_FILE_FOR_DYNACONF\", default_paths)\n\n# # ENV SETTINGS\n# # In dynaconf 1.0.0 `NAMESPACE` got renamed to `ENV`\n\n# The environment variable to switch current env\nENV_SWITCHER_FOR_DYNACONF = get(\n \"ENV_SWITCHER_FOR_DYNACONF\", \"ENV_FOR_DYNACONF\"\n)\n\n# The current env by default is DEVELOPMENT\n# to switch is needed to `export ENV_FOR_DYNACONF=PRODUCTION`\n# or put that value in .env file\n# this value is used only when reading files like .toml|yaml|ini|json\nENV_FOR_DYNACONF = get(ENV_SWITCHER_FOR_DYNACONF, \"DEVELOPMENT\")\n\n# Default values is taken from DEFAULT pseudo env\n# this value is used only when reading files like .toml|yaml|ini|json\nDEFAULT_ENV_FOR_DYNACONF = get(\"DEFAULT_ENV_FOR_DYNACONF\", \"DEFAULT\")\n\n# Global values are taken from DYNACONF env used for exported envvars\n# Values here overwrites all other envs\n# This namespace is used for files and also envvars\nENVVAR_PREFIX_FOR_DYNACONF = get(\"ENVVAR_PREFIX_FOR_DYNACONF\", \"DYNACONF\")\n\n# The default encoding to open settings files\nENCODING_FOR_DYNACONF = get(\"ENCODING_FOR_DYNACONF\", \"utf-8\")\n\n# Merge objects on load\nMERGE_ENABLED_FOR_DYNACONF = get(\"MERGE_ENABLED_FOR_DYNACONF\", False)\n\n# BY default `__` is the separator for nested env vars\n# export `DYNACONF__DATABASE__server=server.com`\n# export `DYNACONF__DATABASE__PORT=6666`\n# Should result in settings.DATABASE == {'server': 'server.com', 'PORT': 6666}\n# To disable it one can set `NESTED_SEPARATOR_FOR_DYNACONF=false`\nNESTED_SEPARATOR_FOR_DYNACONF = get(\"NESTED_SEPARATOR_FOR_DYNACONF\", \"__\")\n\n# The env var specifying settings module\nENVVAR_FOR_DYNACONF = get(\"ENVVAR_FOR_DYNACONF\", \"SETTINGS_FILE_FOR_DYNACONF\")\n\n# Default values for redis configs\ndefault_redis = {\n \"host\": get(\"REDIS_HOST_FOR_DYNACONF\", \"localhost\"),\n \"port\": int(get(\"REDIS_PORT_FOR_DYNACONF\", 6379)),\n \"db\": int(get(\"REDIS_DB_FOR_DYNACONF\", 0)),\n \"decode_responses\": get(\"REDIS_DECODE_FOR_DYNACONF\", True),\n}\nREDIS_FOR_DYNACONF = get(\"REDIS_FOR_DYNACONF\", default_redis)\nREDIS_ENABLED_FOR_DYNACONF = get(\"REDIS_ENABLED_FOR_DYNACONF\", False)\n\n# Hashicorp Vault Project\nvault_scheme = get(\"VAULT_SCHEME_FOR_DYNACONF\", \"http\")\nvault_host = get(\"VAULT_HOST_FOR_DYNACONF\", \"localhost\")\nvault_port = get(\"VAULT_PORT_FOR_DYNACONF\", \"8200\")\ndefault_vault = {\n \"url\": get(\n \"VAULT_URL_FOR_DYNACONF\",\n \"{}://{}:{}\".format(vault_scheme, vault_host, vault_port),\n ),\n \"token\": get(\"VAULT_TOKEN_FOR_DYNACONF\", None),\n \"cert\": get(\"VAULT_CERT_FOR_DYNACONF\", None),\n \"verify\": get(\"VAULT_VERIFY_FOR_DYNACONF\", None),\n \"timeout\": get(\"VAULT_TIMEOUT_FOR_DYNACONF\", None),\n \"proxies\": get(\"VAULT_PROXIES_FOR_DYNACONF\", None),\n \"allow_redirects\": get(\"VAULT_ALLOW_REDIRECTS_FOR_DYNACONF\", None),\n}\nVAULT_FOR_DYNACONF = get(\"VAULT_FOR_DYNACONF\", default_vault)\nVAULT_ENABLED_FOR_DYNACONF = get(\"VAULT_ENABLED_FOR_DYNACONF\", False)\nVAULT_PATH_FOR_DYNACONF = get(\"VAULT_PATH_FOR_DYNACONF\", \"dynaconf\")\nVAULT_ROLE_ID_FOR_DYNACONF = get(\"VAULT_ROLE_ID_FOR_DYNACONF\", None)\nVAULT_SECRET_ID_FOR_DYNACONF = get(\"VAULT_SECRET_ID_FOR_DYNACONF\", None)\n\n# Only core loaders defined on this list will be invoked\ncore_loaders = [\"YAML\", \"TOML\", \"INI\", \"JSON\", \"PY\"]\nCORE_LOADERS_FOR_DYNACONF = get(\"CORE_LOADERS_FOR_DYNACONF\", core_loaders)\n\n# External Loaders to read vars from different data stores\ndefault_loaders = [\n \"dynaconf.loaders.env_loader\",\n # 'dynaconf.loaders.redis_loader'\n # 'dynaconf.loaders.vault_loader'\n]\nLOADERS_FOR_DYNACONF = get(\"LOADERS_FOR_DYNACONF\", default_loaders)\n\n# Errors in loaders should be silenced?\nSILENT_ERRORS_FOR_DYNACONF = get(\"SILENT_ERRORS_FOR_DYNACONF\", True)\n\n# always fresh variables\nFRESH_VARS_FOR_DYNACONF = get(\"FRESH_VARS_FOR_DYNACONF\", [])\n\n# debug\nDEBUG_LEVEL_FOR_DYNACONF = get(\"DEBUG_LEVEL_FOR_DYNACONF\", \"NOTSET\")\n\nYAML = get(\"YAML\", None)\nTOML = get(\"TOML\", None)\nJSON = get(\"JSON\", None)\nINI = get(\"INI\", None)\n\nDOTENV_PATH_FOR_DYNACONF = get(\"DOTENV_PATH_FOR_DYNACONF\", None)\nDOTENV_VERBOSE_FOR_DYNACONF = get(\"DOTENV_VERBOSE_FOR_DYNACONF\", False)\nDOTENV_OVERRIDE_FOR_DYNACONF = get(\"DOTENV_OVERRIDE_FOR_DYNACONF\", False)\n\n# Currently this is only used by cli. INSTANCE_FOR_DYNACONF specifies python\n# dotted path to custom LazySettings instance. Last dotted path item should be\n# instance of LazySettings.\nINSTANCE_FOR_DYNACONF = get(\"INSTANCE_FOR_DYNACONF\", None)\n\n# https://msg.pyyaml.org/load\nYAML_LOADER_FOR_DYNACONF = get(\"YAML_LOADER_FOR_DYNACONF\", \"full_load\")\n\n# Use commentjson? https://commentjson.readthedocs.io/en/latest/\nCOMMENTJSON_ENABLED_FOR_DYNACONF = get(\n \"COMMENTJSON_ENABLED_FOR_DYNACONF\", False\n)\n\n# Extra file, or list of files where to look for secrets\n# useful for CI environment like jenkins\n# where you can export this variable pointing to a local\n# absolute path of the secrets file.\nSECRETS_FOR_DYNACONF = get(\"SECRETS_FOR_DYNACONF\", None)\n\n# To include extra paths based on envvar\nINCLUDES_FOR_DYNACONF = get(\"INCLUDES_FOR_DYNACONF\", [])\n\n# Files to skip if found on search tree\nSKIP_FILES_FOR_DYNACONF = get(\"SKIP_FILES_FOR_DYNACONF\", [])\n\n\n# Backwards compatibility with renamed variables\nfor old, new in RENAMED_VARS.items():\n setattr(sys.modules[__name__], old, locals()[new])\n", "path": "dynaconf/default_settings.py"}], "after_files": [{"content": "import importlib\nimport os\nimport sys\nimport warnings\n\nfrom dynaconf.utils import raw_logger\nfrom dynaconf.utils import RENAMED_VARS\nfrom dynaconf.utils import warn_deprecations\nfrom dynaconf.utils.files import find_file\nfrom dynaconf.utils.parse_conf import parse_conf_data\n\ntry:\n from dotenv import load_dotenv\nexcept ImportError: # pragma: no cover\n load_dotenv = lambda *args, **kwargs: None # noqa\n\n\ndef try_renamed(key, value, older_key, current_key):\n if value is None:\n if key == current_key:\n if older_key in os.environ:\n warnings.warn(\n \"{0} is deprecated please use {1}\".format(\n older_key, current_key\n ),\n DeprecationWarning,\n )\n value = os.environ[older_key]\n return value\n\n\ndef get(key, default=None):\n value = os.environ.get(key.upper())\n\n # compatibility with renamed variables\n for old, new in RENAMED_VARS.items():\n value = try_renamed(key, value, old, new)\n\n return (\n parse_conf_data(value, tomlfy=True) if value is not None else default\n )\n\n\ndef start_dotenv(obj=None, root_path=None):\n # load_from_dotenv_if_installed\n obj = obj or {}\n _find_file = getattr(obj, \"find_file\", find_file)\n root_path = (\n root_path\n or getattr(obj, \"_root_path\", None)\n or get(\"ROOT_PATH_FOR_DYNACONF\")\n )\n raw_logger().debug(\n \"Starting Dynaconf Dotenv %s\",\n \"for {0}\".format(root_path) if root_path else \"Base\",\n )\n\n dotenv_path = (\n obj.get(\"DOTENV_PATH_FOR_DYNACONF\")\n or get(\"DOTENV_PATH_FOR_DYNACONF\")\n or _find_file(\".env\", project_root=root_path)\n )\n\n load_dotenv(\n dotenv_path,\n verbose=obj.get(\"DOTENV_VERBOSE_FOR_DYNACONF\", False),\n override=obj.get(\"DOTENV_OVERRIDE_FOR_DYNACONF\", False),\n )\n\n warn_deprecations(os.environ)\n\n\ndef reload(*args, **kwargs):\n start_dotenv(*args, **kwargs)\n importlib.reload(sys.modules[__name__])\n\n\n# default proj root\n# pragma: no cover\nROOT_PATH_FOR_DYNACONF = get(\"ROOT_PATH_FOR_DYNACONF\", None)\n\n# Default settings file\ndefault_paths = (\n \"settings.py,.secrets.py,\"\n \"settings.toml,settings.tml,.secrets.toml,.secrets.tml,\"\n \"settings.yaml,settings.yml,.secrets.yaml,.secrets.yml,\"\n \"settings.ini,settings.conf,settings.properties,\"\n \".secrets.ini,.secrets.conf,.secrets.properties,\"\n \"settings.json,.secrets.json\"\n)\nSETTINGS_FILE_FOR_DYNACONF = get(\"SETTINGS_FILE_FOR_DYNACONF\", default_paths)\n\n# # ENV SETTINGS\n# # In dynaconf 1.0.0 `NAMESPACE` got renamed to `ENV`\n\n# The environment variable to switch current env\nENV_SWITCHER_FOR_DYNACONF = get(\n \"ENV_SWITCHER_FOR_DYNACONF\", \"ENV_FOR_DYNACONF\"\n)\n\n# The current env by default is DEVELOPMENT\n# to switch is needed to `export ENV_FOR_DYNACONF=PRODUCTION`\n# or put that value in .env file\n# this value is used only when reading files like .toml|yaml|ini|json\nENV_FOR_DYNACONF = get(ENV_SWITCHER_FOR_DYNACONF, \"DEVELOPMENT\")\n\n# Default values is taken from DEFAULT pseudo env\n# this value is used only when reading files like .toml|yaml|ini|json\nDEFAULT_ENV_FOR_DYNACONF = get(\"DEFAULT_ENV_FOR_DYNACONF\", \"DEFAULT\")\n\n# Global values are taken from DYNACONF env used for exported envvars\n# Values here overwrites all other envs\n# This namespace is used for files and also envvars\nENVVAR_PREFIX_FOR_DYNACONF = get(\"ENVVAR_PREFIX_FOR_DYNACONF\", \"DYNACONF\")\n\n# The default encoding to open settings files\nENCODING_FOR_DYNACONF = get(\"ENCODING_FOR_DYNACONF\", \"utf-8\")\n\n# Merge objects on load\nMERGE_ENABLED_FOR_DYNACONF = get(\"MERGE_ENABLED_FOR_DYNACONF\", False)\n\n# BY default `__` is the separator for nested env vars\n# export `DYNACONF__DATABASE__server=server.com`\n# export `DYNACONF__DATABASE__PORT=6666`\n# Should result in settings.DATABASE == {'server': 'server.com', 'PORT': 6666}\n# To disable it one can set `NESTED_SEPARATOR_FOR_DYNACONF=false`\nNESTED_SEPARATOR_FOR_DYNACONF = get(\"NESTED_SEPARATOR_FOR_DYNACONF\", \"__\")\n\n# The env var specifying settings module\nENVVAR_FOR_DYNACONF = get(\"ENVVAR_FOR_DYNACONF\", \"SETTINGS_FILE_FOR_DYNACONF\")\n\n# Default values for redis configs\ndefault_redis = {\n \"host\": get(\"REDIS_HOST_FOR_DYNACONF\", \"localhost\"),\n \"port\": int(get(\"REDIS_PORT_FOR_DYNACONF\", 6379)),\n \"db\": int(get(\"REDIS_DB_FOR_DYNACONF\", 0)),\n \"decode_responses\": get(\"REDIS_DECODE_FOR_DYNACONF\", True),\n}\nREDIS_FOR_DYNACONF = get(\"REDIS_FOR_DYNACONF\", default_redis)\nREDIS_ENABLED_FOR_DYNACONF = get(\"REDIS_ENABLED_FOR_DYNACONF\", False)\n\n# Hashicorp Vault Project\nvault_scheme = get(\"VAULT_SCHEME_FOR_DYNACONF\", \"http\")\nvault_host = get(\"VAULT_HOST_FOR_DYNACONF\", \"localhost\")\nvault_port = get(\"VAULT_PORT_FOR_DYNACONF\", \"8200\")\ndefault_vault = {\n \"url\": get(\n \"VAULT_URL_FOR_DYNACONF\",\n \"{}://{}:{}\".format(vault_scheme, vault_host, vault_port),\n ),\n \"token\": get(\"VAULT_TOKEN_FOR_DYNACONF\", None),\n \"cert\": get(\"VAULT_CERT_FOR_DYNACONF\", None),\n \"verify\": get(\"VAULT_VERIFY_FOR_DYNACONF\", None),\n \"timeout\": get(\"VAULT_TIMEOUT_FOR_DYNACONF\", None),\n \"proxies\": get(\"VAULT_PROXIES_FOR_DYNACONF\", None),\n \"allow_redirects\": get(\"VAULT_ALLOW_REDIRECTS_FOR_DYNACONF\", None),\n}\nVAULT_FOR_DYNACONF = get(\"VAULT_FOR_DYNACONF\", default_vault)\nVAULT_ENABLED_FOR_DYNACONF = get(\"VAULT_ENABLED_FOR_DYNACONF\", False)\nVAULT_PATH_FOR_DYNACONF = get(\"VAULT_PATH_FOR_DYNACONF\", \"dynaconf\")\nVAULT_ROLE_ID_FOR_DYNACONF = get(\"VAULT_ROLE_ID_FOR_DYNACONF\", None)\nVAULT_SECRET_ID_FOR_DYNACONF = get(\"VAULT_SECRET_ID_FOR_DYNACONF\", None)\n\n# Only core loaders defined on this list will be invoked\ncore_loaders = [\"YAML\", \"TOML\", \"INI\", \"JSON\", \"PY\"]\nCORE_LOADERS_FOR_DYNACONF = get(\"CORE_LOADERS_FOR_DYNACONF\", core_loaders)\n\n# External Loaders to read vars from different data stores\ndefault_loaders = [\n \"dynaconf.loaders.env_loader\",\n # 'dynaconf.loaders.redis_loader'\n # 'dynaconf.loaders.vault_loader'\n]\nLOADERS_FOR_DYNACONF = get(\"LOADERS_FOR_DYNACONF\", default_loaders)\n\n# Errors in loaders should be silenced?\nSILENT_ERRORS_FOR_DYNACONF = get(\"SILENT_ERRORS_FOR_DYNACONF\", True)\n\n# always fresh variables\nFRESH_VARS_FOR_DYNACONF = get(\"FRESH_VARS_FOR_DYNACONF\", [])\n\n# debug\nDEBUG_LEVEL_FOR_DYNACONF = get(\"DEBUG_LEVEL_FOR_DYNACONF\", \"NOTSET\")\n\nDOTENV_PATH_FOR_DYNACONF = get(\"DOTENV_PATH_FOR_DYNACONF\", None)\nDOTENV_VERBOSE_FOR_DYNACONF = get(\"DOTENV_VERBOSE_FOR_DYNACONF\", False)\nDOTENV_OVERRIDE_FOR_DYNACONF = get(\"DOTENV_OVERRIDE_FOR_DYNACONF\", False)\n\n# Currently this is only used by cli. INSTANCE_FOR_DYNACONF specifies python\n# dotted path to custom LazySettings instance. Last dotted path item should be\n# instance of LazySettings.\nINSTANCE_FOR_DYNACONF = get(\"INSTANCE_FOR_DYNACONF\", None)\n\n# https://msg.pyyaml.org/load\nYAML_LOADER_FOR_DYNACONF = get(\"YAML_LOADER_FOR_DYNACONF\", \"full_load\")\n\n# Use commentjson? https://commentjson.readthedocs.io/en/latest/\nCOMMENTJSON_ENABLED_FOR_DYNACONF = get(\n \"COMMENTJSON_ENABLED_FOR_DYNACONF\", False\n)\n\n# Extra file, or list of files where to look for secrets\n# useful for CI environment like jenkins\n# where you can export this variable pointing to a local\n# absolute path of the secrets file.\nSECRETS_FOR_DYNACONF = get(\"SECRETS_FOR_DYNACONF\", None)\n\n# To include extra paths based on envvar\nINCLUDES_FOR_DYNACONF = get(\"INCLUDES_FOR_DYNACONF\", [])\n\n# Files to skip if found on search tree\nSKIP_FILES_FOR_DYNACONF = get(\"SKIP_FILES_FOR_DYNACONF\", [])\n\n\n# Backwards compatibility with renamed variables\nfor old, new in RENAMED_VARS.items():\n setattr(sys.modules[__name__], old, locals()[new])\n", "path": "dynaconf/default_settings.py"}]}
| 3,264 | 192 |
gh_patches_debug_11540
|
rasdani/github-patches
|
git_diff
|
plotly__dash-1493
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] dash doesn't get imported when a file named "org.py", "dash.py", or "test.py" with specific content is present in the current directory // "AttributeError: module 'dash' has no attribute 'Dash'"
**Describe your context**
```
dash (1.9.1)
dash-core-components (1.8.1)
dash-html-components (1.0.2)
dash-renderer (1.2.4)
dash-table (4.6.1)
```
**Describe the bug**
If a file named ``org.py`` is present in the current directory with the following content:
```
import dash_core_components as dcc
```
then dash doesn't import and I get the following message:
```
>>> import dash
Dash was not successfully imported. Make sure you don't have a file named
'dash.py' in your current directory.
```
**Expected behavior**
dash should import without any error.
**Additional info**
- The org.py is never imported
- If I rename the file to a different name dash get imported without any problem.
- The problem is shown also with ``import dash_html_components as html```
- The problem is shown either on Windows and in Linux
- Tested with python3.4, python3.6, python3.8
**Steps to replicate the problem on Linux**
```
$ mkdir mytest
$ cd mytest
$ echo "import dash_core_components as dcc" > org.py
$ python3 -m venv venv
$ . venv/bin/activate
(venv) $ pip install dash
(venv) $ python
Python 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import dash
Dash was not successfully imported. Make sure you don't have a file named
'dash.py' in your current directory.
(venv) $
```
if I rename the file the import works:
```
(venv) $ mv org.py othername.py
(venv) $ python
Python 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import dash
>>>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/__init__.py`
Content:
```
1 from .dash import Dash, no_update # noqa: F401
2 from . import dependencies # noqa: F401
3 from . import development # noqa: F401
4 from . import exceptions # noqa: F401
5 from . import resources # noqa: F401
6 from .version import __version__ # noqa: F401
7 from ._callback_context import callback_context # noqa: F401
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dash/__init__.py b/dash/__init__.py
--- a/dash/__init__.py
+++ b/dash/__init__.py
@@ -1,7 +1,11 @@
-from .dash import Dash, no_update # noqa: F401
-from . import dependencies # noqa: F401
-from . import development # noqa: F401
-from . import exceptions # noqa: F401
-from . import resources # noqa: F401
-from .version import __version__ # noqa: F401
-from ._callback_context import callback_context # noqa: F401
+# pylint: disable=C0413
+# __plotly_dash is for the "make sure you don't have a dash.py" check
+# must come before any other imports.
+__plotly_dash = True
+from .dash import Dash, no_update # noqa: F401,E402
+from . import dependencies # noqa: F401,E402
+from . import development # noqa: F401,E402
+from . import exceptions # noqa: F401,E402
+from . import resources # noqa: F401,E402
+from .version import __version__ # noqa: F401,E402
+from ._callback_context import callback_context # noqa: F401,E402
|
{"golden_diff": "diff --git a/dash/__init__.py b/dash/__init__.py\n--- a/dash/__init__.py\n+++ b/dash/__init__.py\n@@ -1,7 +1,11 @@\n-from .dash import Dash, no_update # noqa: F401\n-from . import dependencies # noqa: F401\n-from . import development # noqa: F401\n-from . import exceptions # noqa: F401\n-from . import resources # noqa: F401\n-from .version import __version__ # noqa: F401\n-from ._callback_context import callback_context # noqa: F401\n+# pylint: disable=C0413\n+# __plotly_dash is for the \"make sure you don't have a dash.py\" check\n+# must come before any other imports.\n+__plotly_dash = True\n+from .dash import Dash, no_update # noqa: F401,E402\n+from . import dependencies # noqa: F401,E402\n+from . import development # noqa: F401,E402\n+from . import exceptions # noqa: F401,E402\n+from . import resources # noqa: F401,E402\n+from .version import __version__ # noqa: F401,E402\n+from ._callback_context import callback_context # noqa: F401,E402\n", "issue": "[BUG] dash doesn't get imported when a file named \"org.py\", \"dash.py\", or \"test.py\" with specific content is present in the current directory // \"AttributeError: module 'dash' has no attribute 'Dash'\"\n**Describe your context**\r\n\r\n```\r\ndash (1.9.1)\r\ndash-core-components (1.8.1)\r\ndash-html-components (1.0.2)\r\ndash-renderer (1.2.4)\r\ndash-table (4.6.1)\r\n\r\n```\r\n\r\n**Describe the bug**\r\n\r\nIf a file named ``org.py`` is present in the current directory with the following content:\r\n\r\n```\r\nimport dash_core_components as dcc\r\n```\r\n\r\nthen dash doesn't import and I get the following message:\r\n```\r\n>>> import dash\r\nDash was not successfully imported. Make sure you don't have a file named\r\n'dash.py' in your current directory.\r\n```\r\n\r\n**Expected behavior**\r\ndash should import without any error.\r\n\r\n**Additional info**\r\n- The org.py is never imported\r\n- If I rename the file to a different name dash get imported without any problem.\r\n- The problem is shown also with ``import dash_html_components as html```\r\n- The problem is shown either on Windows and in Linux\r\n- Tested with python3.4, python3.6, python3.8\r\n\r\n**Steps to replicate the problem on Linux**\r\n```\r\n$ mkdir mytest\r\n$ cd mytest\r\n$ echo \"import dash_core_components as dcc\" > org.py\r\n$ python3 -m venv venv\r\n$ . venv/bin/activate\r\n(venv) $ pip install dash\r\n(venv) $ python\r\nPython 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import dash\r\nDash was not successfully imported. Make sure you don't have a file named\r\n'dash.py' in your current directory.\r\n(venv) $\r\n```\r\n\r\nif I rename the file the import works:\r\n```\r\n(venv) $ mv org.py othername.py\r\n(venv) $ python\r\nPython 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import dash\r\n>>>\r\n```\r\n\n", "before_files": [{"content": "from .dash import Dash, no_update # noqa: F401\nfrom . import dependencies # noqa: F401\nfrom . import development # noqa: F401\nfrom . import exceptions # noqa: F401\nfrom . import resources # noqa: F401\nfrom .version import __version__ # noqa: F401\nfrom ._callback_context import callback_context # noqa: F401\n", "path": "dash/__init__.py"}], "after_files": [{"content": "# pylint: disable=C0413\n# __plotly_dash is for the \"make sure you don't have a dash.py\" check\n# must come before any other imports.\n__plotly_dash = True\nfrom .dash import Dash, no_update # noqa: F401,E402\nfrom . import dependencies # noqa: F401,E402\nfrom . import development # noqa: F401,E402\nfrom . import exceptions # noqa: F401,E402\nfrom . import resources # noqa: F401,E402\nfrom .version import __version__ # noqa: F401,E402\nfrom ._callback_context import callback_context # noqa: F401,E402\n", "path": "dash/__init__.py"}]}
| 893 | 331 |
gh_patches_debug_25606
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-521
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: all_members_are_administrators fails
I don;t know if telegram api changed but the parameter to tell if all administrators in a group are admin has changed: to `all_members_are_administrators` Chat's fail to update with this parameter
### Steps to reproduce
1. Create a group with "all members are administrators enabled
2. Add a bot to it
3. send the bot a message
4. ` assert print(update.message.chat.all_members_are_admins)==True`
### Expected behaviour
It should pass the assert
### Actual behaviour
This failes the assert
### Way to fix
rename to `all_members_are_administrators`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/chat.py`
Content:
```
1 #!/usr/bin/env python
2 # pylint: disable=C0103,W0622
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2016
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram Chat."""
21
22 from telegram import TelegramObject
23
24
25 class Chat(TelegramObject):
26 """This object represents a Telegram Chat.
27
28 Attributes:
29 id (int):
30 type (str): Can be 'private', 'group', 'supergroup' or 'channel'
31 title (str): Title, for channels and group chats
32 username (str): Username, for private chats and channels if available
33 first_name (str): First name of the other party in a private chat
34 last_name (str): Last name of the other party in a private chat
35 all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.
36
37 Args:
38 id (int):
39 type (str):
40 title (Optional[str]):
41 username(Optional[str]):
42 first_name(Optional[str]):
43 last_name(Optional[str]):
44 bot (Optional[Bot]): The Bot to use for instance methods
45 **kwargs (dict): Arbitrary keyword arguments.
46
47 """
48 PRIVATE = 'private'
49 GROUP = 'group'
50 SUPERGROUP = 'supergroup'
51 CHANNEL = 'channel'
52
53 def __init__(self,
54 id,
55 type,
56 title='',
57 username='',
58 first_name='',
59 last_name='',
60 all_members_are_admins=False,
61 bot=None,
62 **kwargs):
63 # Required
64 self.id = int(id)
65 self.type = type
66 # Optionals
67 self.title = title
68 self.username = username
69 self.first_name = first_name
70 self.last_name = last_name
71 self.all_members_are_admins = all_members_are_admins
72
73 self.bot = bot
74
75 @staticmethod
76 def de_json(data, bot):
77 """
78 Args:
79 data (dict):
80 bot (telegram.Bot):
81
82 Returns:
83 telegram.Chat:
84 """
85 if not data:
86 return None
87
88 return Chat(bot=bot, **data)
89
90 def send_action(self, *args, **kwargs):
91 """Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``"""
92 return self.bot.sendChatAction(self.id, *args, **kwargs)
93
94 def leave(self, *args, **kwargs):
95 """Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``"""
96 return self.bot.leaveChat(self.id, *args, **kwargs)
97
98 def get_administrators(self, *args, **kwargs):
99 """Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``"""
100 return self.bot.getChatAdministrators(self.id, *args, **kwargs)
101
102 def get_members_count(self, *args, **kwargs):
103 """Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``"""
104 return self.bot.getChatMembersCount(self.id, *args, **kwargs)
105
106 def get_member(self, *args, **kwargs):
107 """Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``"""
108 return self.bot.getChatMember(self.id, *args, **kwargs)
109
110 def kick_member(self, *args, **kwargs):
111 """Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``"""
112 return self.bot.kickChatMember(self.id, *args, **kwargs)
113
114 def unban_member(self, *args, **kwargs):
115 """Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``"""
116 return self.bot.unbanChatMember(self.id, *args, **kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram/chat.py b/telegram/chat.py
--- a/telegram/chat.py
+++ b/telegram/chat.py
@@ -32,7 +32,7 @@
username (str): Username, for private chats and channels if available
first_name (str): First name of the other party in a private chat
last_name (str): Last name of the other party in a private chat
- all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.
+ all_members_are_administrators (bool): True if group has 'All Members Are Administrators'
Args:
id (int):
@@ -57,7 +57,7 @@
username='',
first_name='',
last_name='',
- all_members_are_admins=False,
+ all_members_are_administrators=False,
bot=None,
**kwargs):
# Required
@@ -68,7 +68,7 @@
self.username = username
self.first_name = first_name
self.last_name = last_name
- self.all_members_are_admins = all_members_are_admins
+ self.all_members_are_administrators = all_members_are_administrators
self.bot = bot
|
{"golden_diff": "diff --git a/telegram/chat.py b/telegram/chat.py\n--- a/telegram/chat.py\n+++ b/telegram/chat.py\n@@ -32,7 +32,7 @@\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n- all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.\n+ all_members_are_administrators (bool): True if group has 'All Members Are Administrators'\n \n Args:\n id (int):\n@@ -57,7 +57,7 @@\n username='',\n first_name='',\n last_name='',\n- all_members_are_admins=False,\n+ all_members_are_administrators=False,\n bot=None,\n **kwargs):\n # Required\n@@ -68,7 +68,7 @@\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n- self.all_members_are_admins = all_members_are_admins\n+ self.all_members_are_administrators = all_members_are_administrators\n \n self.bot = bot\n", "issue": "BUG: all_members_are_administrators fails\nI don;t know if telegram api changed but the parameter to tell if all administrators in a group are admin has changed: to `all_members_are_administrators` Chat's fail to update with this parameter\r\n\r\n### Steps to reproduce\r\n1. Create a group with \"all members are administrators enabled\r\n2. Add a bot to it\r\n3. send the bot a message\r\n4. ` assert print(update.message.chat.all_members_are_admins)==True`\r\n\r\n### Expected behaviour\r\nIt should pass the assert\r\n\r\n### Actual behaviour\r\nThis failes the assert\r\n\r\n### Way to fix\r\nrename to `all_members_are_administrators`\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram Chat.\"\"\"\n\nfrom telegram import TelegramObject\n\n\nclass Chat(TelegramObject):\n \"\"\"This object represents a Telegram Chat.\n\n Attributes:\n id (int):\n type (str): Can be 'private', 'group', 'supergroup' or 'channel'\n title (str): Title, for channels and group chats\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.\n\n Args:\n id (int):\n type (str):\n title (Optional[str]):\n username(Optional[str]):\n first_name(Optional[str]):\n last_name(Optional[str]):\n bot (Optional[Bot]): The Bot to use for instance methods\n **kwargs (dict): Arbitrary keyword arguments.\n\n \"\"\"\n PRIVATE = 'private'\n GROUP = 'group'\n SUPERGROUP = 'supergroup'\n CHANNEL = 'channel'\n\n def __init__(self,\n id,\n type,\n title='',\n username='',\n first_name='',\n last_name='',\n all_members_are_admins=False,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.type = type\n # Optionals\n self.title = title\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.all_members_are_admins = all_members_are_admins\n\n self.bot = bot\n\n @staticmethod\n def de_json(data, bot):\n \"\"\"\n Args:\n data (dict):\n bot (telegram.Bot):\n\n Returns:\n telegram.Chat:\n \"\"\"\n if not data:\n return None\n\n return Chat(bot=bot, **data)\n\n def send_action(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.sendChatAction(self.id, *args, **kwargs)\n\n def leave(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.leaveChat(self.id, *args, **kwargs)\n\n def get_administrators(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatAdministrators(self.id, *args, **kwargs)\n\n def get_members_count(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMembersCount(self.id, *args, **kwargs)\n\n def get_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMember(self.id, *args, **kwargs)\n\n def kick_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.kickChatMember(self.id, *args, **kwargs)\n\n def unban_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.unbanChatMember(self.id, *args, **kwargs)\n", "path": "telegram/chat.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram Chat.\"\"\"\n\nfrom telegram import TelegramObject\n\n\nclass Chat(TelegramObject):\n \"\"\"This object represents a Telegram Chat.\n\n Attributes:\n id (int):\n type (str): Can be 'private', 'group', 'supergroup' or 'channel'\n title (str): Title, for channels and group chats\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n all_members_are_administrators (bool): True if group has 'All Members Are Administrators'\n\n Args:\n id (int):\n type (str):\n title (Optional[str]):\n username(Optional[str]):\n first_name(Optional[str]):\n last_name(Optional[str]):\n bot (Optional[Bot]): The Bot to use for instance methods\n **kwargs (dict): Arbitrary keyword arguments.\n\n \"\"\"\n PRIVATE = 'private'\n GROUP = 'group'\n SUPERGROUP = 'supergroup'\n CHANNEL = 'channel'\n\n def __init__(self,\n id,\n type,\n title='',\n username='',\n first_name='',\n last_name='',\n all_members_are_administrators=False,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.type = type\n # Optionals\n self.title = title\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.all_members_are_administrators = all_members_are_administrators\n\n self.bot = bot\n\n @staticmethod\n def de_json(data, bot):\n \"\"\"\n Args:\n data (dict):\n bot (telegram.Bot):\n\n Returns:\n telegram.Chat:\n \"\"\"\n if not data:\n return None\n\n return Chat(bot=bot, **data)\n\n def send_action(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.sendChatAction(self.id, *args, **kwargs)\n\n def leave(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.leaveChat(self.id, *args, **kwargs)\n\n def get_administrators(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatAdministrators(self.id, *args, **kwargs)\n\n def get_members_count(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMembersCount(self.id, *args, **kwargs)\n\n def get_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMember(self.id, *args, **kwargs)\n\n def kick_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.kickChatMember(self.id, *args, **kwargs)\n\n def unban_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.unbanChatMember(self.id, *args, **kwargs)\n", "path": "telegram/chat.py"}]}
| 1,635 | 270 |
gh_patches_debug_13165
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-1027
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Zero
>>> wt.units.convert(0, "wn", "nm")
ZeroDivisionError: division by zero
>>> wt.units.convert(0, "nm", "wn")
ZeroDivisionError: division by zero
Should return inf
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/units.py`
Content:
```
1 """Unit and label handling in WrightTools."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import warnings
8
9 import pint
10
11
12 # --- define --------------------------------------------------------------------------------------
13
14 # Thise "blessed" units are here primarily for backwards compatibility, in particular
15 # to enable the behavior of `data.convert` which will convert freely between the energy units
16 # but does not go to time (where delay will)
17 # Since both of these context can convert to [length] units, they are interconvertible, but we
18 # do not want them to automatically do so.
19 # This list is (at creation time) purely reflective of historical units supported pre pint
20 # There is nothing preventing other units from being used and converted to, only to enable
21 # expected behavior
22 # 2021-01-29 KFS
23 blessed_units = (
24 # angle
25 "rad",
26 "deg",
27 # delay
28 "fs",
29 "ps",
30 "ns",
31 "mm_delay",
32 # energy
33 "nm",
34 "wn",
35 "eV",
36 "meV",
37 "Hz",
38 "THz",
39 "GHz",
40 # optical density
41 "mOD",
42 # position
43 "nm_p",
44 "um",
45 "mm",
46 "cm",
47 "in",
48 # absolute temperature
49 "K",
50 "deg_C",
51 "deg_F",
52 "deg_R",
53 # time
54 "fs_t",
55 "ps_t",
56 "ns_t",
57 "us_t",
58 "ns_t",
59 "s_t",
60 "m_t",
61 "h_t",
62 "d_t",
63 )
64
65 ureg = pint.UnitRegistry()
66 ureg.define("[fluence] = [energy] / [area]")
67
68 ureg.define("OD = [] ")
69
70 ureg.define("wavenumber = 1 / cm = cm^{-1} = wn")
71
72
73 # Aliases for backwards compatability
74 ureg.define("@alias s = s_t")
75 ureg.define("@alias min = m_t")
76 ureg.define("@alias hour = h_t")
77 ureg.define("@alias d = d_t")
78
79 ureg.define("@alias degC = deg_C")
80 ureg.define("@alias degF = deg_F")
81 ureg.define("@alias degR = deg_R")
82
83 ureg.define("@alias m = m_delay")
84
85 delay = pint.Context("delay", defaults={"n": 1, "num_pass": 2})
86 delay.add_transformation(
87 "[length]", "[time]", lambda ureg, x, n=1, num_pass=2: num_pass * x / ureg.speed_of_light * n
88 )
89 delay.add_transformation(
90 "[time]", "[length]", lambda ureg, x, n=1, num_pass=2: x / num_pass * ureg.speed_of_light / n
91 )
92 ureg.enable_contexts("spectroscopy", delay)
93
94 # --- functions -----------------------------------------------------------------------------------
95
96
97 def converter(val, current_unit, destination_unit):
98 """Convert from one unit to another.
99
100 Parameters
101 ----------
102 val : number
103 Number to convert.
104 current_unit : string
105 Current unit.
106 destination_unit : string
107 Destination unit.
108
109 Returns
110 -------
111 number
112 Converted value.
113 """
114 try:
115 val = ureg.Quantity(val, current_unit).to(destination_unit).magnitude
116 except (pint.errors.DimensionalityError, pint.errors.UndefinedUnitError, AttributeError):
117 warnings.warn(
118 "conversion {0} to {1} not valid: returning input".format(
119 current_unit, destination_unit
120 )
121 )
122 return val
123
124
125 convert = converter
126
127
128 def get_symbol(units) -> str:
129 """Get default symbol type.
130
131 Parameters
132 ----------
133 units_str : string
134 Units.
135
136 Returns
137 -------
138 string
139 LaTeX formatted symbol.
140 """
141 quantity = ureg.Quantity(1, ureg[units])
142 if quantity.check("[length]"):
143 return r"\lambda"
144 elif quantity.check("1 / [length]"):
145 return r"\bar\nu"
146 elif quantity.check("[energy]"):
147 return r"\hslash\omega"
148 elif quantity.check("1 / [time]"):
149 return "f"
150 elif quantity.check("[time]"):
151 return r"\tau"
152 elif quantity.check("[fluence]"):
153 return r"\mathcal{F}"
154 elif quantity.check("[temperature]"):
155 return "T"
156 elif ureg[units] in (ureg.deg, ureg.radian):
157 return r"\omega"
158 else:
159 return None
160
161
162 def get_valid_conversions(units, options=blessed_units) -> tuple:
163 return tuple(i for i in options if is_valid_conversion(units, i) and units != i)
164
165
166 def is_valid_conversion(a, b, blessed=True) -> bool:
167 if a is None:
168 return b is None
169 if blessed and a in blessed_units and b in blessed_units:
170 blessed_energy_units = {"nm", "wn", "eV", "meV", "Hz", "THz", "GHz"}
171 if a in blessed_energy_units:
172 return b in blessed_energy_units
173 blessed_delay_units = {"fs", "ps", "ns", "mm_delay"}
174 if a in blessed_delay_units:
175 return b in blessed_delay_units
176 return ureg.Unit(a).dimensionality == ureg.Unit(b).dimensionality
177 try:
178 return ureg.Unit(a).is_compatible_with(b, "spectroscopy")
179 except pint.UndefinedUnitError:
180 return False
181
182
183 def kind(units):
184 """Find the dimensionality of given units.
185
186 Parameters
187 ----------
188 units : string
189 The units of interest
190
191 Returns
192 -------
193 string
194 The kind of the given units. If no match is found, returns None.
195 """
196 if units is None:
197 return None
198 return str(ureg.Unit(units).dimensionality)
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/WrightTools/units.py b/WrightTools/units.py
--- a/WrightTools/units.py
+++ b/WrightTools/units.py
@@ -115,10 +115,13 @@
val = ureg.Quantity(val, current_unit).to(destination_unit).magnitude
except (pint.errors.DimensionalityError, pint.errors.UndefinedUnitError, AttributeError):
warnings.warn(
- "conversion {0} to {1} not valid: returning input".format(
- current_unit, destination_unit
- )
+ f"conversion {current_unit} to {destination_unit} not valid: returning input"
)
+ except ZeroDivisionError:
+ warnings.warn(
+ f"conversion {current_unit} to {destination_unit} resulted in ZeroDivisionError: returning inf"
+ )
+ return float("inf")
return val
|
{"golden_diff": "diff --git a/WrightTools/units.py b/WrightTools/units.py\n--- a/WrightTools/units.py\n+++ b/WrightTools/units.py\n@@ -115,10 +115,13 @@\n val = ureg.Quantity(val, current_unit).to(destination_unit).magnitude\n except (pint.errors.DimensionalityError, pint.errors.UndefinedUnitError, AttributeError):\n warnings.warn(\n- \"conversion {0} to {1} not valid: returning input\".format(\n- current_unit, destination_unit\n- )\n+ f\"conversion {current_unit} to {destination_unit} not valid: returning input\"\n )\n+ except ZeroDivisionError:\n+ warnings.warn(\n+ f\"conversion {current_unit} to {destination_unit} resulted in ZeroDivisionError: returning inf\"\n+ )\n+ return float(\"inf\")\n return val\n", "issue": "Zero\n>>> wt.units.convert(0, \"wn\", \"nm\")\r\nZeroDivisionError: division by zero\r\n>>> wt.units.convert(0, \"nm\", \"wn\")\r\nZeroDivisionError: division by zero\r\n\r\nShould return inf\n", "before_files": [{"content": "\"\"\"Unit and label handling in WrightTools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport warnings\n\nimport pint\n\n\n# --- define --------------------------------------------------------------------------------------\n\n# Thise \"blessed\" units are here primarily for backwards compatibility, in particular\n# to enable the behavior of `data.convert` which will convert freely between the energy units\n# but does not go to time (where delay will)\n# Since both of these context can convert to [length] units, they are interconvertible, but we\n# do not want them to automatically do so.\n# This list is (at creation time) purely reflective of historical units supported pre pint\n# There is nothing preventing other units from being used and converted to, only to enable\n# expected behavior\n# 2021-01-29 KFS\nblessed_units = (\n # angle\n \"rad\",\n \"deg\",\n # delay\n \"fs\",\n \"ps\",\n \"ns\",\n \"mm_delay\",\n # energy\n \"nm\",\n \"wn\",\n \"eV\",\n \"meV\",\n \"Hz\",\n \"THz\",\n \"GHz\",\n # optical density\n \"mOD\",\n # position\n \"nm_p\",\n \"um\",\n \"mm\",\n \"cm\",\n \"in\",\n # absolute temperature\n \"K\",\n \"deg_C\",\n \"deg_F\",\n \"deg_R\",\n # time\n \"fs_t\",\n \"ps_t\",\n \"ns_t\",\n \"us_t\",\n \"ns_t\",\n \"s_t\",\n \"m_t\",\n \"h_t\",\n \"d_t\",\n)\n\nureg = pint.UnitRegistry()\nureg.define(\"[fluence] = [energy] / [area]\")\n\nureg.define(\"OD = [] \")\n\nureg.define(\"wavenumber = 1 / cm = cm^{-1} = wn\")\n\n\n# Aliases for backwards compatability\nureg.define(\"@alias s = s_t\")\nureg.define(\"@alias min = m_t\")\nureg.define(\"@alias hour = h_t\")\nureg.define(\"@alias d = d_t\")\n\nureg.define(\"@alias degC = deg_C\")\nureg.define(\"@alias degF = deg_F\")\nureg.define(\"@alias degR = deg_R\")\n\nureg.define(\"@alias m = m_delay\")\n\ndelay = pint.Context(\"delay\", defaults={\"n\": 1, \"num_pass\": 2})\ndelay.add_transformation(\n \"[length]\", \"[time]\", lambda ureg, x, n=1, num_pass=2: num_pass * x / ureg.speed_of_light * n\n)\ndelay.add_transformation(\n \"[time]\", \"[length]\", lambda ureg, x, n=1, num_pass=2: x / num_pass * ureg.speed_of_light / n\n)\nureg.enable_contexts(\"spectroscopy\", delay)\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef converter(val, current_unit, destination_unit):\n \"\"\"Convert from one unit to another.\n\n Parameters\n ----------\n val : number\n Number to convert.\n current_unit : string\n Current unit.\n destination_unit : string\n Destination unit.\n\n Returns\n -------\n number\n Converted value.\n \"\"\"\n try:\n val = ureg.Quantity(val, current_unit).to(destination_unit).magnitude\n except (pint.errors.DimensionalityError, pint.errors.UndefinedUnitError, AttributeError):\n warnings.warn(\n \"conversion {0} to {1} not valid: returning input\".format(\n current_unit, destination_unit\n )\n )\n return val\n\n\nconvert = converter\n\n\ndef get_symbol(units) -> str:\n \"\"\"Get default symbol type.\n\n Parameters\n ----------\n units_str : string\n Units.\n\n Returns\n -------\n string\n LaTeX formatted symbol.\n \"\"\"\n quantity = ureg.Quantity(1, ureg[units])\n if quantity.check(\"[length]\"):\n return r\"\\lambda\"\n elif quantity.check(\"1 / [length]\"):\n return r\"\\bar\\nu\"\n elif quantity.check(\"[energy]\"):\n return r\"\\hslash\\omega\"\n elif quantity.check(\"1 / [time]\"):\n return \"f\"\n elif quantity.check(\"[time]\"):\n return r\"\\tau\"\n elif quantity.check(\"[fluence]\"):\n return r\"\\mathcal{F}\"\n elif quantity.check(\"[temperature]\"):\n return \"T\"\n elif ureg[units] in (ureg.deg, ureg.radian):\n return r\"\\omega\"\n else:\n return None\n\n\ndef get_valid_conversions(units, options=blessed_units) -> tuple:\n return tuple(i for i in options if is_valid_conversion(units, i) and units != i)\n\n\ndef is_valid_conversion(a, b, blessed=True) -> bool:\n if a is None:\n return b is None\n if blessed and a in blessed_units and b in blessed_units:\n blessed_energy_units = {\"nm\", \"wn\", \"eV\", \"meV\", \"Hz\", \"THz\", \"GHz\"}\n if a in blessed_energy_units:\n return b in blessed_energy_units\n blessed_delay_units = {\"fs\", \"ps\", \"ns\", \"mm_delay\"}\n if a in blessed_delay_units:\n return b in blessed_delay_units\n return ureg.Unit(a).dimensionality == ureg.Unit(b).dimensionality\n try:\n return ureg.Unit(a).is_compatible_with(b, \"spectroscopy\")\n except pint.UndefinedUnitError:\n return False\n\n\ndef kind(units):\n \"\"\"Find the dimensionality of given units.\n\n Parameters\n ----------\n units : string\n The units of interest\n\n Returns\n -------\n string\n The kind of the given units. If no match is found, returns None.\n \"\"\"\n if units is None:\n return None\n return str(ureg.Unit(units).dimensionality)\n", "path": "WrightTools/units.py"}], "after_files": [{"content": "\"\"\"Unit and label handling in WrightTools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport warnings\n\nimport pint\n\n\n# --- define --------------------------------------------------------------------------------------\n\n# Thise \"blessed\" units are here primarily for backwards compatibility, in particular\n# to enable the behavior of `data.convert` which will convert freely between the energy units\n# but does not go to time (where delay will)\n# Since both of these context can convert to [length] units, they are interconvertible, but we\n# do not want them to automatically do so.\n# This list is (at creation time) purely reflective of historical units supported pre pint\n# There is nothing preventing other units from being used and converted to, only to enable\n# expected behavior\n# 2021-01-29 KFS\nblessed_units = (\n # angle\n \"rad\",\n \"deg\",\n # delay\n \"fs\",\n \"ps\",\n \"ns\",\n \"mm_delay\",\n # energy\n \"nm\",\n \"wn\",\n \"eV\",\n \"meV\",\n \"Hz\",\n \"THz\",\n \"GHz\",\n # optical density\n \"mOD\",\n # position\n \"nm_p\",\n \"um\",\n \"mm\",\n \"cm\",\n \"in\",\n # absolute temperature\n \"K\",\n \"deg_C\",\n \"deg_F\",\n \"deg_R\",\n # time\n \"fs_t\",\n \"ps_t\",\n \"ns_t\",\n \"us_t\",\n \"ns_t\",\n \"s_t\",\n \"m_t\",\n \"h_t\",\n \"d_t\",\n)\n\nureg = pint.UnitRegistry()\nureg.define(\"[fluence] = [energy] / [area]\")\n\nureg.define(\"OD = [] \")\n\nureg.define(\"wavenumber = 1 / cm = cm^{-1} = wn\")\n\n\n# Aliases for backwards compatability\nureg.define(\"@alias s = s_t\")\nureg.define(\"@alias min = m_t\")\nureg.define(\"@alias hour = h_t\")\nureg.define(\"@alias d = d_t\")\n\nureg.define(\"@alias degC = deg_C\")\nureg.define(\"@alias degF = deg_F\")\nureg.define(\"@alias degR = deg_R\")\n\nureg.define(\"@alias m = m_delay\")\n\ndelay = pint.Context(\"delay\", defaults={\"n\": 1, \"num_pass\": 2})\ndelay.add_transformation(\n \"[length]\", \"[time]\", lambda ureg, x, n=1, num_pass=2: num_pass * x / ureg.speed_of_light * n\n)\ndelay.add_transformation(\n \"[time]\", \"[length]\", lambda ureg, x, n=1, num_pass=2: x / num_pass * ureg.speed_of_light / n\n)\nureg.enable_contexts(\"spectroscopy\", delay)\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef converter(val, current_unit, destination_unit):\n \"\"\"Convert from one unit to another.\n\n Parameters\n ----------\n val : number\n Number to convert.\n current_unit : string\n Current unit.\n destination_unit : string\n Destination unit.\n\n Returns\n -------\n number\n Converted value.\n \"\"\"\n try:\n val = ureg.Quantity(val, current_unit).to(destination_unit).magnitude\n except (pint.errors.DimensionalityError, pint.errors.UndefinedUnitError, AttributeError):\n warnings.warn(\n f\"conversion {current_unit} to {destination_unit} not valid: returning input\"\n )\n except ZeroDivisionError:\n warnings.warn(\n f\"conversion {current_unit} to {destination_unit} resulted in ZeroDivisionError: returning inf\"\n )\n return float(\"inf\")\n return val\n\n\nconvert = converter\n\n\ndef get_symbol(units) -> str:\n \"\"\"Get default symbol type.\n\n Parameters\n ----------\n units_str : string\n Units.\n\n Returns\n -------\n string\n LaTeX formatted symbol.\n \"\"\"\n quantity = ureg.Quantity(1, ureg[units])\n if quantity.check(\"[length]\"):\n return r\"\\lambda\"\n elif quantity.check(\"1 / [length]\"):\n return r\"\\bar\\nu\"\n elif quantity.check(\"[energy]\"):\n return r\"\\hslash\\omega\"\n elif quantity.check(\"1 / [time]\"):\n return \"f\"\n elif quantity.check(\"[time]\"):\n return r\"\\tau\"\n elif quantity.check(\"[fluence]\"):\n return r\"\\mathcal{F}\"\n elif quantity.check(\"[temperature]\"):\n return \"T\"\n elif ureg[units] in (ureg.deg, ureg.radian):\n return r\"\\omega\"\n else:\n return None\n\n\ndef get_valid_conversions(units, options=blessed_units) -> tuple:\n return tuple(i for i in options if is_valid_conversion(units, i) and units != i)\n\n\ndef is_valid_conversion(a, b, blessed=True) -> bool:\n if a is None:\n return b is None\n if blessed and a in blessed_units and b in blessed_units:\n blessed_energy_units = {\"nm\", \"wn\", \"eV\", \"meV\", \"Hz\", \"THz\", \"GHz\"}\n if a in blessed_energy_units:\n return b in blessed_energy_units\n blessed_delay_units = {\"fs\", \"ps\", \"ns\", \"mm_delay\"}\n if a in blessed_delay_units:\n return b in blessed_delay_units\n return ureg.Unit(a).dimensionality == ureg.Unit(b).dimensionality\n try:\n return ureg.Unit(a).is_compatible_with(b, \"spectroscopy\")\n except pint.UndefinedUnitError:\n return False\n\n\ndef kind(units):\n \"\"\"Find the dimensionality of given units.\n\n Parameters\n ----------\n units : string\n The units of interest\n\n Returns\n -------\n string\n The kind of the given units. If no match is found, returns None.\n \"\"\"\n if units is None:\n return None\n return str(ureg.Unit(units).dimensionality)\n", "path": "WrightTools/units.py"}]}
| 2,072 | 194 |
gh_patches_debug_14699
|
rasdani/github-patches
|
git_diff
|
OpenCTI-Platform__connectors-608
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Riskiq Connector throwing errors
## Description
RiskIQ connector is not working as expected with the correct credentials defined.
## Environment
1. OS - Ubuntu
2. OpenCTI version: 5.1.3
## Riskiq Connector Logs:
INFO:root:Listing Threat-Actors with filters null.,
INFO:root:Connector registered with ID: c455a3a4-cc8f-4133-9f8d-4098fa984de8,
INFO:root:Starting ping alive thread,
INFO:riskiq.client:URL: https://api.riskiq.net/pt/v2,
INFO:root:Starting RiskIQ connector...,
INFO:root:Running RiskIQ connector...,
INFO:root:Connector interval sec: 60,
INFO:root:[RiskIQ] loaded state: {},
INFO:root:RiskIQ connector clean run,
INFO:root:Initiate work for c455a3a4-cc8f-4133-9f8d-4098fa984de8,
INFO:root:[RiskIQ] workid opencti-work--2c314a8c-484e-4a68-9b31-bb782b3b22ed initiated,
INFO:root:[RiskIQ] last run: None,
**ERROR:root:Parser must be a string or character stream, not NoneType **

++Config File++
connector-riskiq:
image: opencti/connector-riskiq:5.1.3
environment:
- OPENCTI_URL=http://opencti:8080
- OPENCTI_TOKEN=c9dc7053-6bdf-44ca-9dfd-c0e3ff249eb8
- CONNECTOR_ID=c455a3a4-cc8f-4133-9f8d-4098fa984de8
- CONNECTOR_TYPE=EXTERNAL_IMPORT
- CONNECTOR_NAME=RISKIQ
- CONNECTOR_SCOPE=riskiq
- CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)
- CONNECTOR_LOG_LEVEL=info
- RISKIQ_BASE_URL=https://api.riskiq.net/pt/v2
- [email protected]
- RISKIQ_PASSWORD=xxxxxxx
- RISKIQ_INTERVAL_SEC=86400
restart: always
It was working before, after a reboot the riskiq connector started logging the above error as "ERROR:root:Parser must be a string or character stream, not NoneType".
Please help to fix the same.
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `external-import/riskiq/src/riskiq/article_importer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """OpenCTI RiskIQ's article importer module."""
3 import datetime
4 import itertools
5 from typing import Any, Mapping, Optional
6
7 from dateutil import parser
8 from pycti import OpenCTIConnectorHelper
9 from stix2 import (
10 Bundle,
11 DomainName,
12 EmailAddress,
13 File,
14 Identity,
15 Indicator,
16 IPv4Address,
17 Mutex,
18 Report,
19 TLP_AMBER,
20 TLP_WHITE,
21 URL,
22 X509Certificate,
23 utils,
24 )
25 from stix2.v21 import _Observable
26
27 from .utils import datetime_to_timestamp
28
29
30 class ArticleImporter:
31 """Article importer class."""
32
33 _LATEST_ARTICLE_TIMESTAMP = "latest_article_timestamp"
34
35 def __init__(
36 self, helper: OpenCTIConnectorHelper, article: dict[str, Any], author: Identity
37 ):
38 """Initialization of the article importer."""
39 self.helper = helper
40 self.article = article
41 self.author = author
42 self.work_id: Optional[str] = None
43 # Use custom properties to set the author and the confidence level of the object.
44 self.custom_props = {
45 "x_opencti_created_by_ref": self.author["id"],
46 }
47
48 def _process_indicator(self, indicator: Indicator) -> list[_Observable]:
49 """
50 Process the indicator depending on its type.
51
52 Parameters
53 ----------
54 indicator : Indicator
55 One indicator from an article.
56
57 Returns
58 -------
59 List of Observable
60 A list of Observable depending on the indicator type.
61 """
62 indicator_type = indicator["type"]
63 values = indicator["values"]
64 tlp_marking = TLP_WHITE if indicator["source"] == "public" else TLP_AMBER
65
66 if indicator_type == "hash_md5":
67 return [
68 File(
69 type="file",
70 hashes={"MD5": v},
71 object_marking_refs=tlp_marking,
72 custom_properties=self.custom_props,
73 )
74 for v in values
75 ]
76
77 if indicator_type in ["hash_sha1", "sha1"]:
78 return [
79 File(
80 type="file",
81 hashes={"SHA-1": v},
82 object_marking_refs=tlp_marking,
83 custom_properties=self.custom_props,
84 )
85 for v in values
86 ]
87
88 if indicator_type in ["sha256", "hash_sha256"]:
89 return [
90 File(
91 type="file",
92 hashes={"SHA-256": v},
93 object_marking_refs=tlp_marking,
94 custom_properties=self.custom_props,
95 )
96 for v in values
97 ]
98
99 if indicator_type == "domain":
100 return [
101 DomainName(
102 type="domain-name",
103 value=v,
104 object_marking_refs=tlp_marking,
105 custom_properties=self.custom_props,
106 )
107 for v in values
108 ]
109
110 if indicator_type in ["email", "emails"]:
111 return [
112 EmailAddress(
113 type="email-addr",
114 value=v,
115 object_marking_refs=tlp_marking,
116 custom_properties=self.custom_props,
117 )
118 for v in values
119 ]
120
121 if indicator_type in ["filename", "filepath"]:
122 return [
123 File(
124 type="file",
125 name=v,
126 object_marking_refs=tlp_marking,
127 custom_properties=self.custom_props,
128 )
129 for v in values
130 ]
131
132 if indicator_type == "ip":
133 return [
134 IPv4Address(
135 type="ipv4-addr",
136 value=v,
137 object_marking_refs=tlp_marking,
138 custom_properties=self.custom_props,
139 )
140 for v in values
141 ]
142
143 if indicator_type in ["proces_mutex", "process_mutex", "mutex"]:
144 return [
145 Mutex(
146 type="mutex",
147 name=v,
148 object_marking_refs=tlp_marking,
149 custom_properties=self.custom_props,
150 )
151 for v in values
152 ]
153
154 if indicator_type == "url":
155 return [
156 URL(
157 type="url",
158 value=v,
159 object_marking_refs=tlp_marking,
160 defanged=False,
161 custom_properties=self.custom_props,
162 )
163 for v in values
164 ]
165
166 if indicator_type == "certificate_sha1":
167 return [
168 X509Certificate(
169 type="x509-certificate",
170 hashes={"SHA-1": v},
171 object_marking_refs=tlp_marking,
172 custom_properties=self.custom_props,
173 )
174 for v in values
175 ]
176
177 if indicator_type in [
178 "certificate_issuerorganizationname",
179 "certificate_issuercommonname",
180 ]:
181 return [
182 X509Certificate(
183 type="x509-certificate",
184 issuer=v,
185 object_marking_refs=tlp_marking,
186 custom_properties=self.custom_props,
187 )
188 for v in values
189 ]
190
191 if indicator_type in [
192 "certificate_subjectorganizationname",
193 "certificate_subjectcountry",
194 "certificate_subjectcommonname",
195 ]:
196 return [
197 X509Certificate(
198 type="x509-certificate",
199 subject=v,
200 object_marking_refs=tlp_marking,
201 custom_properties=self.custom_props,
202 )
203 for v in values
204 ]
205
206 if indicator_type in ["certificate_serialnumber", "code_certificate_serial"]:
207 return [
208 X509Certificate(
209 type="x509-certificate",
210 serial_number=v,
211 object_marking_refs=tlp_marking,
212 custom_properties=self.custom_props,
213 )
214 for v in values
215 ]
216
217 self.helper.log_warning(
218 f"[RiskIQ] indicator with key {indicator_type} not supported. (Values: {values})"
219 )
220 return []
221
222 def run(self, work_id: str, state: Mapping[str, Any]) -> Mapping[str, Any]:
223 """Run the importation of the article."""
224 self.work_id = work_id
225 published = parser.parse(self.article["publishedDate"])
226 created = parser.parse(self.article["createdDate"])
227
228 indicators = itertools.chain(
229 *[
230 self._process_indicator(indicator)
231 for indicator in self.article["indicators"]
232 ]
233 )
234
235 indicators = utils.deduplicate(list(indicators))
236 # Return the initial state if we don't have any indicators.
237 if not indicators:
238 self.helper.log_info("No indicator in article, report will not be created.")
239 return state
240
241 self.helper.log_debug(f"Number of indicators: {len(indicators)}")
242
243 # Check if all indicators' TLP marking are `TLP_WHITE`.
244 report_tlp = TLP_WHITE
245 if TLP_AMBER in [i["object_marking_refs"][0] for i in indicators]:
246 report_tlp = TLP_AMBER
247
248 report = Report(
249 type="report",
250 name=self.article.get("title", "RiskIQ Threat Report"),
251 description=self.article["summary"],
252 report_types=["threat-report"],
253 created_by_ref=self.author,
254 created=created,
255 published=published,
256 lang="en",
257 labels=self.article["tags"],
258 object_refs=indicators,
259 object_marking_refs=report_tlp,
260 external_references=[
261 {
262 "source_name": "riskiq",
263 "url": self.article["link"],
264 "external_id": self.article["guid"],
265 }
266 ],
267 allow_custom=True,
268 )
269 self.helper.log_debug(f"[RiskIQ] Report = {report}")
270
271 bundle = Bundle(objects=indicators + [report, self.author], allow_custom=True)
272 self.helper.log_info("[RiskIQ] Sending report STIX2 bundle")
273 self._send_bundle(bundle)
274
275 return self._create_state(created)
276
277 @classmethod
278 def _create_state(
279 cls, latest_datetime: Optional[datetime.datetime]
280 ) -> Mapping[str, Any]:
281 if latest_datetime is None:
282 return {}
283
284 return {cls._LATEST_ARTICLE_TIMESTAMP: datetime_to_timestamp(latest_datetime)}
285
286 def _send_bundle(self, bundle: Bundle) -> None:
287 serialized_bundle = bundle.serialize()
288 self.helper.send_stix2_bundle(serialized_bundle, work_id=self.work_id)
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/external-import/riskiq/src/riskiq/article_importer.py b/external-import/riskiq/src/riskiq/article_importer.py
--- a/external-import/riskiq/src/riskiq/article_importer.py
+++ b/external-import/riskiq/src/riskiq/article_importer.py
@@ -222,8 +222,14 @@
def run(self, work_id: str, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Run the importation of the article."""
self.work_id = work_id
- published = parser.parse(self.article["publishedDate"])
created = parser.parse(self.article["createdDate"])
+ # RisIQ API does not always provide the `publishedDate`.
+ # If it does not exist, take the value of the `createdDate` instead.
+ published = (
+ parser.parse(self.article["publishedDate"])
+ if self.article["publishedDate"] is not None
+ else created
+ )
indicators = itertools.chain(
*[
|
{"golden_diff": "diff --git a/external-import/riskiq/src/riskiq/article_importer.py b/external-import/riskiq/src/riskiq/article_importer.py\n--- a/external-import/riskiq/src/riskiq/article_importer.py\n+++ b/external-import/riskiq/src/riskiq/article_importer.py\n@@ -222,8 +222,14 @@\n def run(self, work_id: str, state: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"Run the importation of the article.\"\"\"\n self.work_id = work_id\n- published = parser.parse(self.article[\"publishedDate\"])\n created = parser.parse(self.article[\"createdDate\"])\n+ # RisIQ API does not always provide the `publishedDate`.\n+ # If it does not exist, take the value of the `createdDate` instead.\n+ published = (\n+ parser.parse(self.article[\"publishedDate\"])\n+ if self.article[\"publishedDate\"] is not None\n+ else created\n+ )\n \n indicators = itertools.chain(\n *[\n", "issue": "Riskiq Connector throwing errors\n## Description\r\n\r\nRiskIQ connector is not working as expected with the correct credentials defined.\r\n\r\n## Environment\r\n\r\n1. OS - Ubuntu \r\n2. OpenCTI version: 5.1.3\r\n\r\n## Riskiq Connector Logs:\r\nINFO:root:Listing Threat-Actors with filters null.,\r\nINFO:root:Connector registered with ID: c455a3a4-cc8f-4133-9f8d-4098fa984de8,\r\nINFO:root:Starting ping alive thread,\r\nINFO:riskiq.client:URL: https://api.riskiq.net/pt/v2,\r\nINFO:root:Starting RiskIQ connector...,\r\nINFO:root:Running RiskIQ connector...,\r\nINFO:root:Connector interval sec: 60,\r\nINFO:root:[RiskIQ] loaded state: {},\r\nINFO:root:RiskIQ connector clean run,\r\nINFO:root:Initiate work for c455a3a4-cc8f-4133-9f8d-4098fa984de8,\r\nINFO:root:[RiskIQ] workid opencti-work--2c314a8c-484e-4a68-9b31-bb782b3b22ed initiated,\r\nINFO:root:[RiskIQ] last run: None,\r\n**ERROR:root:Parser must be a string or character stream, not NoneType **\r\n\r\n\r\n\r\n++Config File++\r\n\r\n connector-riskiq:\r\n image: opencti/connector-riskiq:5.1.3\r\n environment:\r\n - OPENCTI_URL=http://opencti:8080\r\n - OPENCTI_TOKEN=c9dc7053-6bdf-44ca-9dfd-c0e3ff249eb8\r\n - CONNECTOR_ID=c455a3a4-cc8f-4133-9f8d-4098fa984de8\r\n - CONNECTOR_TYPE=EXTERNAL_IMPORT\r\n - CONNECTOR_NAME=RISKIQ\r\n - CONNECTOR_SCOPE=riskiq\r\n - CONNECTOR_CONFIDENCE_LEVEL=15 # From 0 (Unknown) to 100 (Fully trusted)\r\n - CONNECTOR_LOG_LEVEL=info\r\n - RISKIQ_BASE_URL=https://api.riskiq.net/pt/v2\r\n - [email protected]\r\n - RISKIQ_PASSWORD=xxxxxxx\r\n - RISKIQ_INTERVAL_SEC=86400\r\n restart: always\r\n\r\n\r\nIt was working before, after a reboot the riskiq connector started logging the above error as \"ERROR:root:Parser must be a string or character stream, not NoneType\".\r\n\r\nPlease help to fix the same.\r\n\r\nThanks\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"OpenCTI RiskIQ's article importer module.\"\"\"\nimport datetime\nimport itertools\nfrom typing import Any, Mapping, Optional\n\nfrom dateutil import parser\nfrom pycti import OpenCTIConnectorHelper\nfrom stix2 import (\n Bundle,\n DomainName,\n EmailAddress,\n File,\n Identity,\n Indicator,\n IPv4Address,\n Mutex,\n Report,\n TLP_AMBER,\n TLP_WHITE,\n URL,\n X509Certificate,\n utils,\n)\nfrom stix2.v21 import _Observable\n\nfrom .utils import datetime_to_timestamp\n\n\nclass ArticleImporter:\n \"\"\"Article importer class.\"\"\"\n\n _LATEST_ARTICLE_TIMESTAMP = \"latest_article_timestamp\"\n\n def __init__(\n self, helper: OpenCTIConnectorHelper, article: dict[str, Any], author: Identity\n ):\n \"\"\"Initialization of the article importer.\"\"\"\n self.helper = helper\n self.article = article\n self.author = author\n self.work_id: Optional[str] = None\n # Use custom properties to set the author and the confidence level of the object.\n self.custom_props = {\n \"x_opencti_created_by_ref\": self.author[\"id\"],\n }\n\n def _process_indicator(self, indicator: Indicator) -> list[_Observable]:\n \"\"\"\n Process the indicator depending on its type.\n\n Parameters\n ----------\n indicator : Indicator\n One indicator from an article.\n\n Returns\n -------\n List of Observable\n A list of Observable depending on the indicator type.\n \"\"\"\n indicator_type = indicator[\"type\"]\n values = indicator[\"values\"]\n tlp_marking = TLP_WHITE if indicator[\"source\"] == \"public\" else TLP_AMBER\n\n if indicator_type == \"hash_md5\":\n return [\n File(\n type=\"file\",\n hashes={\"MD5\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"hash_sha1\", \"sha1\"]:\n return [\n File(\n type=\"file\",\n hashes={\"SHA-1\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"sha256\", \"hash_sha256\"]:\n return [\n File(\n type=\"file\",\n hashes={\"SHA-256\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"domain\":\n return [\n DomainName(\n type=\"domain-name\",\n value=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"email\", \"emails\"]:\n return [\n EmailAddress(\n type=\"email-addr\",\n value=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"filename\", \"filepath\"]:\n return [\n File(\n type=\"file\",\n name=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"ip\":\n return [\n IPv4Address(\n type=\"ipv4-addr\",\n value=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"proces_mutex\", \"process_mutex\", \"mutex\"]:\n return [\n Mutex(\n type=\"mutex\",\n name=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"url\":\n return [\n URL(\n type=\"url\",\n value=v,\n object_marking_refs=tlp_marking,\n defanged=False,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"certificate_sha1\":\n return [\n X509Certificate(\n type=\"x509-certificate\",\n hashes={\"SHA-1\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\n \"certificate_issuerorganizationname\",\n \"certificate_issuercommonname\",\n ]:\n return [\n X509Certificate(\n type=\"x509-certificate\",\n issuer=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\n \"certificate_subjectorganizationname\",\n \"certificate_subjectcountry\",\n \"certificate_subjectcommonname\",\n ]:\n return [\n X509Certificate(\n type=\"x509-certificate\",\n subject=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"certificate_serialnumber\", \"code_certificate_serial\"]:\n return [\n X509Certificate(\n type=\"x509-certificate\",\n serial_number=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n self.helper.log_warning(\n f\"[RiskIQ] indicator with key {indicator_type} not supported. (Values: {values})\"\n )\n return []\n\n def run(self, work_id: str, state: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"Run the importation of the article.\"\"\"\n self.work_id = work_id\n published = parser.parse(self.article[\"publishedDate\"])\n created = parser.parse(self.article[\"createdDate\"])\n\n indicators = itertools.chain(\n *[\n self._process_indicator(indicator)\n for indicator in self.article[\"indicators\"]\n ]\n )\n\n indicators = utils.deduplicate(list(indicators))\n # Return the initial state if we don't have any indicators.\n if not indicators:\n self.helper.log_info(\"No indicator in article, report will not be created.\")\n return state\n\n self.helper.log_debug(f\"Number of indicators: {len(indicators)}\")\n\n # Check if all indicators' TLP marking are `TLP_WHITE`.\n report_tlp = TLP_WHITE\n if TLP_AMBER in [i[\"object_marking_refs\"][0] for i in indicators]:\n report_tlp = TLP_AMBER\n\n report = Report(\n type=\"report\",\n name=self.article.get(\"title\", \"RiskIQ Threat Report\"),\n description=self.article[\"summary\"],\n report_types=[\"threat-report\"],\n created_by_ref=self.author,\n created=created,\n published=published,\n lang=\"en\",\n labels=self.article[\"tags\"],\n object_refs=indicators,\n object_marking_refs=report_tlp,\n external_references=[\n {\n \"source_name\": \"riskiq\",\n \"url\": self.article[\"link\"],\n \"external_id\": self.article[\"guid\"],\n }\n ],\n allow_custom=True,\n )\n self.helper.log_debug(f\"[RiskIQ] Report = {report}\")\n\n bundle = Bundle(objects=indicators + [report, self.author], allow_custom=True)\n self.helper.log_info(\"[RiskIQ] Sending report STIX2 bundle\")\n self._send_bundle(bundle)\n\n return self._create_state(created)\n\n @classmethod\n def _create_state(\n cls, latest_datetime: Optional[datetime.datetime]\n ) -> Mapping[str, Any]:\n if latest_datetime is None:\n return {}\n\n return {cls._LATEST_ARTICLE_TIMESTAMP: datetime_to_timestamp(latest_datetime)}\n\n def _send_bundle(self, bundle: Bundle) -> None:\n serialized_bundle = bundle.serialize()\n self.helper.send_stix2_bundle(serialized_bundle, work_id=self.work_id)\n", "path": "external-import/riskiq/src/riskiq/article_importer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"OpenCTI RiskIQ's article importer module.\"\"\"\nimport datetime\nimport itertools\nfrom typing import Any, Mapping, Optional\n\nfrom dateutil import parser\nfrom pycti import OpenCTIConnectorHelper\nfrom stix2 import (\n Bundle,\n DomainName,\n EmailAddress,\n File,\n Identity,\n Indicator,\n IPv4Address,\n Mutex,\n Report,\n TLP_AMBER,\n TLP_WHITE,\n URL,\n X509Certificate,\n utils,\n)\nfrom stix2.v21 import _Observable\n\nfrom .utils import datetime_to_timestamp\n\n\nclass ArticleImporter:\n \"\"\"Article importer class.\"\"\"\n\n _LATEST_ARTICLE_TIMESTAMP = \"latest_article_timestamp\"\n\n def __init__(\n self, helper: OpenCTIConnectorHelper, article: dict[str, Any], author: Identity\n ):\n \"\"\"Initialization of the article importer.\"\"\"\n self.helper = helper\n self.article = article\n self.author = author\n self.work_id: Optional[str] = None\n # Use custom properties to set the author and the confidence level of the object.\n self.custom_props = {\n \"x_opencti_created_by_ref\": self.author[\"id\"],\n }\n\n def _process_indicator(self, indicator: Indicator) -> list[_Observable]:\n \"\"\"\n Process the indicator depending on its type.\n\n Parameters\n ----------\n indicator : Indicator\n One indicator from an article.\n\n Returns\n -------\n List of Observable\n A list of Observable depending on the indicator type.\n \"\"\"\n indicator_type = indicator[\"type\"]\n values = indicator[\"values\"]\n tlp_marking = TLP_WHITE if indicator[\"source\"] == \"public\" else TLP_AMBER\n\n if indicator_type == \"hash_md5\":\n return [\n File(\n type=\"file\",\n hashes={\"MD5\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"hash_sha1\", \"sha1\"]:\n return [\n File(\n type=\"file\",\n hashes={\"SHA-1\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"sha256\", \"hash_sha256\"]:\n return [\n File(\n type=\"file\",\n hashes={\"SHA-256\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"domain\":\n return [\n DomainName(\n type=\"domain-name\",\n value=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"email\", \"emails\"]:\n return [\n EmailAddress(\n type=\"email-addr\",\n value=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"filename\", \"filepath\"]:\n return [\n File(\n type=\"file\",\n name=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"ip\":\n return [\n IPv4Address(\n type=\"ipv4-addr\",\n value=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"proces_mutex\", \"process_mutex\", \"mutex\"]:\n return [\n Mutex(\n type=\"mutex\",\n name=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"url\":\n return [\n URL(\n type=\"url\",\n value=v,\n object_marking_refs=tlp_marking,\n defanged=False,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type == \"certificate_sha1\":\n return [\n X509Certificate(\n type=\"x509-certificate\",\n hashes={\"SHA-1\": v},\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\n \"certificate_issuerorganizationname\",\n \"certificate_issuercommonname\",\n ]:\n return [\n X509Certificate(\n type=\"x509-certificate\",\n issuer=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\n \"certificate_subjectorganizationname\",\n \"certificate_subjectcountry\",\n \"certificate_subjectcommonname\",\n ]:\n return [\n X509Certificate(\n type=\"x509-certificate\",\n subject=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n if indicator_type in [\"certificate_serialnumber\", \"code_certificate_serial\"]:\n return [\n X509Certificate(\n type=\"x509-certificate\",\n serial_number=v,\n object_marking_refs=tlp_marking,\n custom_properties=self.custom_props,\n )\n for v in values\n ]\n\n self.helper.log_warning(\n f\"[RiskIQ] indicator with key {indicator_type} not supported. (Values: {values})\"\n )\n return []\n\n def run(self, work_id: str, state: Mapping[str, Any]) -> Mapping[str, Any]:\n \"\"\"Run the importation of the article.\"\"\"\n self.work_id = work_id\n created = parser.parse(self.article[\"createdDate\"])\n # RisIQ API does not always provide the `publishedDate`.\n # If it does not exist, take the value of the `createdDate` instead.\n published = (\n parser.parse(self.article[\"publishedDate\"])\n if self.article[\"publishedDate\"] is not None\n else created\n )\n\n indicators = itertools.chain(\n *[\n self._process_indicator(indicator)\n for indicator in self.article[\"indicators\"]\n ]\n )\n\n indicators = utils.deduplicate(list(indicators))\n # Return the initial state if we don't have any indicators.\n if not indicators:\n self.helper.log_info(\"No indicator in article, report will not be created.\")\n return state\n\n self.helper.log_debug(f\"Number of indicators: {len(indicators)}\")\n\n # Check if all indicators' TLP marking are `TLP_WHITE`.\n report_tlp = TLP_WHITE\n if TLP_AMBER in [i[\"object_marking_refs\"][0] for i in indicators]:\n report_tlp = TLP_AMBER\n\n report = Report(\n type=\"report\",\n name=self.article.get(\"title\", \"RiskIQ Threat Report\"),\n description=self.article[\"summary\"],\n report_types=[\"threat-report\"],\n created_by_ref=self.author,\n created=created,\n published=published,\n lang=\"en\",\n labels=self.article[\"tags\"],\n object_refs=indicators,\n object_marking_refs=report_tlp,\n external_references=[\n {\n \"source_name\": \"riskiq\",\n \"url\": self.article[\"link\"],\n \"external_id\": self.article[\"guid\"],\n }\n ],\n allow_custom=True,\n )\n self.helper.log_debug(f\"[RiskIQ] Report = {report}\")\n\n bundle = Bundle(objects=indicators + [report, self.author], allow_custom=True)\n self.helper.log_info(\"[RiskIQ] Sending report STIX2 bundle\")\n self._send_bundle(bundle)\n\n return self._create_state(created)\n\n @classmethod\n def _create_state(\n cls, latest_datetime: Optional[datetime.datetime]\n ) -> Mapping[str, Any]:\n if latest_datetime is None:\n return {}\n\n return {cls._LATEST_ARTICLE_TIMESTAMP: datetime_to_timestamp(latest_datetime)}\n\n def _send_bundle(self, bundle: Bundle) -> None:\n serialized_bundle = bundle.serialize()\n self.helper.send_stix2_bundle(serialized_bundle, work_id=self.work_id)\n", "path": "external-import/riskiq/src/riskiq/article_importer.py"}]}
| 3,452 | 228 |
gh_patches_debug_32416
|
rasdani/github-patches
|
git_diff
|
linz__geostore-1651
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use latest of each STAC extension version
### Enabler
So that we don't have to manually update the code to use the latest version, we want to automatically use the latest version available in the relevant Git submodule.
Need to check what happens when a file is submitted that references and old version of a stac schema
#### Acceptance Criteria
- [ ] Dependabot PRs for any of the STAC submodules run tests with the latest version of all the extensions in that submodule
- [ ] Add a note to the release documentation about notifying users which STAC extension versions are supported
#### Additional context
This avoids manual work like [this PR to use the latest LINZ STAC extensions](https://github.com/linz/geostore/pull/1444).
Caveat: We currently only support one version of each extension. When extensions release breaking changes this could affect our existing users, and we need to notify them.
#### Tasks
<!-- Tasks needed to complete this enabler -->
- [ ] ...
- [ ] ...
#### Definition of Ready
- [ ] This story is **ready** to work on
- [ ] Negotiable (team can decide how to design and implement)
- [ ] Valuable (from a user perspective)
- [ ] Estimate value applied (agreed by team)
- [ ] Small (so as to fit within an iteration)
- [ ] Testable (in principle, even if there isn't a test for it yet)
- [ ] Environments are ready to meet definition of done
- [ ] Resources required to implement will be ready
- [ ] Everyone understands and agrees with the tasks to complete the story
- [ ] Release value (e.g. Iteration 3) applied
- [ ] Sprint value (e.g. Aug 1 - Aug 15) applied
#### Definition of Done
- [ ] This story is **done**:
- [ ] Acceptance criteria completed
- [ ] Automated tests are passing
- [ ] Code is peer reviewed and pushed to master
- [ ] Deployed successfully to test environment
- [ ] Checked against [CODING guidelines](https://github.com/linz/geostore/blob/master/CODING.md)
- [ ] Relevant new tasks are added to backlog and communicated to the team
- [ ] Important decisions recorded in the issue ticket
- [ ] Readme/Changelog/Diagrams are updated
- [ ] Product Owner has approved acceptance criteria as complete
- [ ] Meets non-functional requirements:
- [ ] Scalability (data): Can scale to 300TB of data and 100,000,000 files and ability to
increase 10% every year
- [ ] Scability (users): Can scale to 100 concurrent users
- [ ] Cost: Data can be stored at < 0.5 NZD per GB per year
- [ ] Performance: A large dataset (500 GB and 50,000 files - e.g. Akl aerial imagery) can be
validated, imported and stored within 24 hours
- [ ] Accessibility: Can be used from LINZ networks and the public internet
- [ ] Availability: System available 24 hours a day and 7 days a week, this does not include
maintenance windows < 4 hours and does not include operational support
- [ ] Recoverability: RPO of fully imported datasets < 4 hours, RTO of a single 3 TB dataset <
12 hours
<!-- Please add one or more of these labels: 'spike', 'refactor', 'architecture', 'infrastructure', 'compliance' -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geostore/check_stac_metadata/stac_validators.py`
Content:
```
1 from functools import cached_property
2 from json import load
3 from os.path import dirname, join
4
5 from jsonschema import Draft7Validator, FormatChecker, RefResolver
6 from jsonschema._utils import URIDict
7 from jsonschema.validators import extend
8
9 from ..stac_format import LINZ_STAC_EXTENSIONS_LOCAL_PATH
10 from ..types import JsonObject
11
12
13 class Schema:
14 def __init__(self, path: str):
15 self.path = path
16
17 @cached_property
18 def as_dict(self) -> JsonObject:
19 with open(join(dirname(__file__), self.path), encoding="utf-8") as file_pointer:
20 result: JsonObject = load(file_pointer)
21 return result
22
23 @cached_property
24 def schema_id(self) -> str:
25 id_: str = self.as_dict["$id"]
26 return id_
27
28 @cached_property
29 def uri(self) -> str:
30 uri_: str = URIDict().normalize(self.schema_id)
31 return uri_
32
33
34 FILE_STAC_SCHEMA_PATH = "file/v2.0.0/schema.json"
35 PROJECTION_STAC_SCHEMA_PATH = "projection/v1.0.0/schema.json"
36 VERSION_STAC_SCHEMA_PATH = "version/v1.0.0/schema.json"
37 FILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)
38
39 STAC_VERSION = "1.0.0"
40 STAC_SPEC_PATH = f"stac-spec/v{STAC_VERSION}"
41 CATALOG_SCHEMA = Schema(f"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json")
42 LINZ_STAC_EXTENSIONS_URL_PATH = "v0.0.14"
43 LINZ_SCHEMA_URL_DIRECTORY = f"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz"
44 LINZ_SCHEMA_URL_PATH = f"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json"
45 LINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))
46 STAC_ITEM_SPEC_PATH = f"{STAC_SPEC_PATH}/item-spec/json-schema"
47 ITEM_SCHEMA = Schema(f"{STAC_ITEM_SPEC_PATH}/item.json")
48 QUALITY_SCHEMA_PATH = f"{LINZ_STAC_EXTENSIONS_URL_PATH}/quality/schema.json"
49
50 schema_store = {}
51 for schema in [
52 CATALOG_SCHEMA,
53 Schema(f"{STAC_SPEC_PATH}/collection-spec/json-schema/collection.json"),
54 FILE_SCHEMA,
55 Schema("geojson-spec/Feature.json"),
56 Schema("geojson-spec/Geometry.json"),
57 ITEM_SCHEMA,
58 Schema(f"{STAC_ITEM_SPEC_PATH}/basics.json"),
59 Schema(f"{STAC_ITEM_SPEC_PATH}/datetime.json"),
60 Schema(f"{STAC_ITEM_SPEC_PATH}/instrument.json"),
61 Schema(f"{STAC_ITEM_SPEC_PATH}/licensing.json"),
62 Schema(f"{STAC_ITEM_SPEC_PATH}/provider.json"),
63 LINZ_SCHEMA,
64 Schema(PROJECTION_STAC_SCHEMA_PATH),
65 Schema(VERSION_STAC_SCHEMA_PATH),
66 Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, QUALITY_SCHEMA_PATH)),
67 ]:
68 # Normalize URLs the same way as jsonschema does
69 schema_store[schema.uri] = schema.as_dict
70
71 BaseSTACValidator = extend(Draft7Validator)
72 BaseSTACValidator.format_checker = FormatChecker()
73
74 STACCatalogSchemaValidator = extend(BaseSTACValidator)(
75 resolver=RefResolver.from_schema(CATALOG_SCHEMA.as_dict, store=schema_store),
76 schema=CATALOG_SCHEMA.as_dict,
77 )
78
79 STACCollectionSchemaValidator = extend(BaseSTACValidator)(
80 resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),
81 schema=LINZ_SCHEMA.as_dict,
82 )
83
84 STACItemSchemaValidator = extend(BaseSTACValidator)(
85 resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),
86 schema=LINZ_SCHEMA.as_dict,
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geostore/check_stac_metadata/stac_validators.py b/geostore/check_stac_metadata/stac_validators.py
--- a/geostore/check_stac_metadata/stac_validators.py
+++ b/geostore/check_stac_metadata/stac_validators.py
@@ -1,6 +1,9 @@
-from functools import cached_property
+from distutils.version import StrictVersion
+from functools import cached_property, lru_cache
from json import load
+from os import scandir
from os.path import dirname, join
+from re import fullmatch
from jsonschema import Draft7Validator, FormatChecker, RefResolver
from jsonschema._utils import URIDict
@@ -31,15 +34,28 @@
return uri_
+@lru_cache
+def get_latest_extension_schema_version(extension_path: str) -> str:
+ directories = scandir(join(dirname(__file__), extension_path))
+ versions = []
+ for directory in directories:
+ if directory.is_dir() and fullmatch(r"v\d+\.\d+\.\d+", directory.name):
+ versions.append(directory.name[1:])
+ return sorted(versions, key=StrictVersion, reverse=True)[0]
+
+
FILE_STAC_SCHEMA_PATH = "file/v2.0.0/schema.json"
PROJECTION_STAC_SCHEMA_PATH = "projection/v1.0.0/schema.json"
VERSION_STAC_SCHEMA_PATH = "version/v1.0.0/schema.json"
FILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)
-STAC_VERSION = "1.0.0"
-STAC_SPEC_PATH = f"stac-spec/v{STAC_VERSION}"
+STAC_SPEC_EXTENSION_PATH = "stac-spec"
+STAC_VERSION = get_latest_extension_schema_version(STAC_SPEC_EXTENSION_PATH)
+STAC_SPEC_PATH = f"{STAC_SPEC_EXTENSION_PATH}/v{STAC_VERSION}"
CATALOG_SCHEMA = Schema(f"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json")
-LINZ_STAC_EXTENSIONS_URL_PATH = "v0.0.14"
+LINZ_STAC_EXTENSIONS_URL_PATH = (
+ f"v{get_latest_extension_schema_version(LINZ_STAC_EXTENSIONS_LOCAL_PATH)}"
+)
LINZ_SCHEMA_URL_DIRECTORY = f"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz"
LINZ_SCHEMA_URL_PATH = f"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json"
LINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))
|
{"golden_diff": "diff --git a/geostore/check_stac_metadata/stac_validators.py b/geostore/check_stac_metadata/stac_validators.py\n--- a/geostore/check_stac_metadata/stac_validators.py\n+++ b/geostore/check_stac_metadata/stac_validators.py\n@@ -1,6 +1,9 @@\n-from functools import cached_property\n+from distutils.version import StrictVersion\n+from functools import cached_property, lru_cache\n from json import load\n+from os import scandir\n from os.path import dirname, join\n+from re import fullmatch\n \n from jsonschema import Draft7Validator, FormatChecker, RefResolver\n from jsonschema._utils import URIDict\n@@ -31,15 +34,28 @@\n return uri_\n \n \n+@lru_cache\n+def get_latest_extension_schema_version(extension_path: str) -> str:\n+ directories = scandir(join(dirname(__file__), extension_path))\n+ versions = []\n+ for directory in directories:\n+ if directory.is_dir() and fullmatch(r\"v\\d+\\.\\d+\\.\\d+\", directory.name):\n+ versions.append(directory.name[1:])\n+ return sorted(versions, key=StrictVersion, reverse=True)[0]\n+\n+\n FILE_STAC_SCHEMA_PATH = \"file/v2.0.0/schema.json\"\n PROJECTION_STAC_SCHEMA_PATH = \"projection/v1.0.0/schema.json\"\n VERSION_STAC_SCHEMA_PATH = \"version/v1.0.0/schema.json\"\n FILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)\n \n-STAC_VERSION = \"1.0.0\"\n-STAC_SPEC_PATH = f\"stac-spec/v{STAC_VERSION}\"\n+STAC_SPEC_EXTENSION_PATH = \"stac-spec\"\n+STAC_VERSION = get_latest_extension_schema_version(STAC_SPEC_EXTENSION_PATH)\n+STAC_SPEC_PATH = f\"{STAC_SPEC_EXTENSION_PATH}/v{STAC_VERSION}\"\n CATALOG_SCHEMA = Schema(f\"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json\")\n-LINZ_STAC_EXTENSIONS_URL_PATH = \"v0.0.14\"\n+LINZ_STAC_EXTENSIONS_URL_PATH = (\n+ f\"v{get_latest_extension_schema_version(LINZ_STAC_EXTENSIONS_LOCAL_PATH)}\"\n+)\n LINZ_SCHEMA_URL_DIRECTORY = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz\"\n LINZ_SCHEMA_URL_PATH = f\"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json\"\n LINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))\n", "issue": "Use latest of each STAC extension version\n### Enabler\r\n\r\nSo that we don't have to manually update the code to use the latest version, we want to automatically use the latest version available in the relevant Git submodule.\r\n\r\nNeed to check what happens when a file is submitted that references and old version of a stac schema\r\n\r\n#### Acceptance Criteria\r\n\r\n- [ ] Dependabot PRs for any of the STAC submodules run tests with the latest version of all the extensions in that submodule\r\n- [ ] Add a note to the release documentation about notifying users which STAC extension versions are supported\r\n\r\n#### Additional context\r\n\r\nThis avoids manual work like [this PR to use the latest LINZ STAC extensions](https://github.com/linz/geostore/pull/1444).\r\n\r\nCaveat: We currently only support one version of each extension. When extensions release breaking changes this could affect our existing users, and we need to notify them.\r\n\r\n#### Tasks\r\n\r\n<!-- Tasks needed to complete this enabler -->\r\n\r\n- [ ] ...\r\n- [ ] ...\r\n\r\n#### Definition of Ready\r\n\r\n- [ ] This story is **ready** to work on\r\n - [ ] Negotiable (team can decide how to design and implement)\r\n - [ ] Valuable (from a user perspective)\r\n - [ ] Estimate value applied (agreed by team)\r\n - [ ] Small (so as to fit within an iteration)\r\n - [ ] Testable (in principle, even if there isn't a test for it yet)\r\n - [ ] Environments are ready to meet definition of done\r\n - [ ] Resources required to implement will be ready\r\n - [ ] Everyone understands and agrees with the tasks to complete the story\r\n - [ ] Release value (e.g. Iteration 3) applied\r\n - [ ] Sprint value (e.g. Aug 1 - Aug 15) applied\r\n\r\n#### Definition of Done\r\n\r\n- [ ] This story is **done**:\r\n - [ ] Acceptance criteria completed\r\n - [ ] Automated tests are passing\r\n - [ ] Code is peer reviewed and pushed to master\r\n - [ ] Deployed successfully to test environment\r\n - [ ] Checked against [CODING guidelines](https://github.com/linz/geostore/blob/master/CODING.md)\r\n - [ ] Relevant new tasks are added to backlog and communicated to the team\r\n - [ ] Important decisions recorded in the issue ticket\r\n - [ ] Readme/Changelog/Diagrams are updated\r\n - [ ] Product Owner has approved acceptance criteria as complete\r\n - [ ] Meets non-functional requirements:\r\n - [ ] Scalability (data): Can scale to 300TB of data and 100,000,000 files and ability to\r\n increase 10% every year\r\n - [ ] Scability (users): Can scale to 100 concurrent users\r\n - [ ] Cost: Data can be stored at < 0.5 NZD per GB per year\r\n - [ ] Performance: A large dataset (500 GB and 50,000 files - e.g. Akl aerial imagery) can be\r\n validated, imported and stored within 24 hours\r\n - [ ] Accessibility: Can be used from LINZ networks and the public internet\r\n - [ ] Availability: System available 24 hours a day and 7 days a week, this does not include\r\n maintenance windows < 4 hours and does not include operational support\r\n - [ ] Recoverability: RPO of fully imported datasets < 4 hours, RTO of a single 3 TB dataset <\r\n 12 hours\r\n\r\n<!-- Please add one or more of these labels: 'spike', 'refactor', 'architecture', 'infrastructure', 'compliance' -->\r\n\n", "before_files": [{"content": "from functools import cached_property\nfrom json import load\nfrom os.path import dirname, join\n\nfrom jsonschema import Draft7Validator, FormatChecker, RefResolver\nfrom jsonschema._utils import URIDict\nfrom jsonschema.validators import extend\n\nfrom ..stac_format import LINZ_STAC_EXTENSIONS_LOCAL_PATH\nfrom ..types import JsonObject\n\n\nclass Schema:\n def __init__(self, path: str):\n self.path = path\n\n @cached_property\n def as_dict(self) -> JsonObject:\n with open(join(dirname(__file__), self.path), encoding=\"utf-8\") as file_pointer:\n result: JsonObject = load(file_pointer)\n return result\n\n @cached_property\n def schema_id(self) -> str:\n id_: str = self.as_dict[\"$id\"]\n return id_\n\n @cached_property\n def uri(self) -> str:\n uri_: str = URIDict().normalize(self.schema_id)\n return uri_\n\n\nFILE_STAC_SCHEMA_PATH = \"file/v2.0.0/schema.json\"\nPROJECTION_STAC_SCHEMA_PATH = \"projection/v1.0.0/schema.json\"\nVERSION_STAC_SCHEMA_PATH = \"version/v1.0.0/schema.json\"\nFILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)\n\nSTAC_VERSION = \"1.0.0\"\nSTAC_SPEC_PATH = f\"stac-spec/v{STAC_VERSION}\"\nCATALOG_SCHEMA = Schema(f\"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json\")\nLINZ_STAC_EXTENSIONS_URL_PATH = \"v0.0.14\"\nLINZ_SCHEMA_URL_DIRECTORY = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz\"\nLINZ_SCHEMA_URL_PATH = f\"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json\"\nLINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))\nSTAC_ITEM_SPEC_PATH = f\"{STAC_SPEC_PATH}/item-spec/json-schema\"\nITEM_SCHEMA = Schema(f\"{STAC_ITEM_SPEC_PATH}/item.json\")\nQUALITY_SCHEMA_PATH = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/quality/schema.json\"\n\nschema_store = {}\nfor schema in [\n CATALOG_SCHEMA,\n Schema(f\"{STAC_SPEC_PATH}/collection-spec/json-schema/collection.json\"),\n FILE_SCHEMA,\n Schema(\"geojson-spec/Feature.json\"),\n Schema(\"geojson-spec/Geometry.json\"),\n ITEM_SCHEMA,\n Schema(f\"{STAC_ITEM_SPEC_PATH}/basics.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/datetime.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/instrument.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/licensing.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/provider.json\"),\n LINZ_SCHEMA,\n Schema(PROJECTION_STAC_SCHEMA_PATH),\n Schema(VERSION_STAC_SCHEMA_PATH),\n Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, QUALITY_SCHEMA_PATH)),\n]:\n # Normalize URLs the same way as jsonschema does\n schema_store[schema.uri] = schema.as_dict\n\nBaseSTACValidator = extend(Draft7Validator)\nBaseSTACValidator.format_checker = FormatChecker()\n\nSTACCatalogSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(CATALOG_SCHEMA.as_dict, store=schema_store),\n schema=CATALOG_SCHEMA.as_dict,\n)\n\nSTACCollectionSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n\nSTACItemSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n", "path": "geostore/check_stac_metadata/stac_validators.py"}], "after_files": [{"content": "from distutils.version import StrictVersion\nfrom functools import cached_property, lru_cache\nfrom json import load\nfrom os import scandir\nfrom os.path import dirname, join\nfrom re import fullmatch\n\nfrom jsonschema import Draft7Validator, FormatChecker, RefResolver\nfrom jsonschema._utils import URIDict\nfrom jsonschema.validators import extend\n\nfrom ..stac_format import LINZ_STAC_EXTENSIONS_LOCAL_PATH\nfrom ..types import JsonObject\n\n\nclass Schema:\n def __init__(self, path: str):\n self.path = path\n\n @cached_property\n def as_dict(self) -> JsonObject:\n with open(join(dirname(__file__), self.path), encoding=\"utf-8\") as file_pointer:\n result: JsonObject = load(file_pointer)\n return result\n\n @cached_property\n def schema_id(self) -> str:\n id_: str = self.as_dict[\"$id\"]\n return id_\n\n @cached_property\n def uri(self) -> str:\n uri_: str = URIDict().normalize(self.schema_id)\n return uri_\n\n\n@lru_cache\ndef get_latest_extension_schema_version(extension_path: str) -> str:\n directories = scandir(join(dirname(__file__), extension_path))\n versions = []\n for directory in directories:\n if directory.is_dir() and fullmatch(r\"v\\d+\\.\\d+\\.\\d+\", directory.name):\n versions.append(directory.name[1:])\n return sorted(versions, key=StrictVersion, reverse=True)[0]\n\n\nFILE_STAC_SCHEMA_PATH = \"file/v2.0.0/schema.json\"\nPROJECTION_STAC_SCHEMA_PATH = \"projection/v1.0.0/schema.json\"\nVERSION_STAC_SCHEMA_PATH = \"version/v1.0.0/schema.json\"\nFILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)\n\nSTAC_SPEC_EXTENSION_PATH = \"stac-spec\"\nSTAC_VERSION = get_latest_extension_schema_version(STAC_SPEC_EXTENSION_PATH)\nSTAC_SPEC_PATH = f\"{STAC_SPEC_EXTENSION_PATH}/v{STAC_VERSION}\"\nCATALOG_SCHEMA = Schema(f\"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json\")\nLINZ_STAC_EXTENSIONS_URL_PATH = (\n f\"v{get_latest_extension_schema_version(LINZ_STAC_EXTENSIONS_LOCAL_PATH)}\"\n)\nLINZ_SCHEMA_URL_DIRECTORY = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz\"\nLINZ_SCHEMA_URL_PATH = f\"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json\"\nLINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))\nSTAC_ITEM_SPEC_PATH = f\"{STAC_SPEC_PATH}/item-spec/json-schema\"\nITEM_SCHEMA = Schema(f\"{STAC_ITEM_SPEC_PATH}/item.json\")\nQUALITY_SCHEMA_PATH = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/quality/schema.json\"\n\nschema_store = {}\nfor schema in [\n CATALOG_SCHEMA,\n Schema(f\"{STAC_SPEC_PATH}/collection-spec/json-schema/collection.json\"),\n FILE_SCHEMA,\n Schema(\"geojson-spec/Feature.json\"),\n Schema(\"geojson-spec/Geometry.json\"),\n ITEM_SCHEMA,\n Schema(f\"{STAC_ITEM_SPEC_PATH}/basics.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/datetime.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/instrument.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/licensing.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/provider.json\"),\n LINZ_SCHEMA,\n Schema(PROJECTION_STAC_SCHEMA_PATH),\n Schema(VERSION_STAC_SCHEMA_PATH),\n Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, QUALITY_SCHEMA_PATH)),\n]:\n # Normalize URLs the same way as jsonschema does\n schema_store[schema.uri] = schema.as_dict\n\nBaseSTACValidator = extend(Draft7Validator)\nBaseSTACValidator.format_checker = FormatChecker()\n\nSTACCatalogSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(CATALOG_SCHEMA.as_dict, store=schema_store),\n schema=CATALOG_SCHEMA.as_dict,\n)\n\nSTACCollectionSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n\nSTACItemSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n", "path": "geostore/check_stac_metadata/stac_validators.py"}]}
| 2,018 | 529 |
gh_patches_debug_22602
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-9612
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Jiosaavn] Extract more metadata
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm requesting a site-specific feature
- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
India
### Example URLs
https://www.jiosaavn.com/song/love-me-again/OgFZRER2A3c
### Provide a description that is worded well enough to be understood
Please add the option to include metadata when songs are downloaded froom Jiosaavn. I am able to add metadata when downloading from youtube music. Please do something similar for Jiosaavn
### Provide verbose output that clearly demonstrates the problem
- [ ] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['--verbose', 'https://www.jiosaavn.com/song/love-me-again/OgFZRER2A3c']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp [615a84447] (win_exe)
[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19045-SP0 (OpenSSL 1.1.1k 25 Mar 2021)
[debug] exe versions: ffmpeg N-112207-g8eb094adb2-20230928 (setts), ffprobe N-112207-g8eb094adb2-20230928
[debug] Optional libraries: Cryptodome-3.20.0, brotli-1.1.0, certifi-2024.02.02, mutagen-1.47.0, requests-2.31.0, sqlite3-3.35.5, urllib3-2.2.1, websockets-12.0
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets
[debug] Loaded 1803 extractors
[JioSaavnSong] Extracting URL: https://www.jiosaavn.com/song/love-me-again/OgFZRER2A3c
[JioSaavnSong] OgFZRER2A3c: Downloading webpage
[JioSaavnSong] OgFZRER2A3c: Downloading format info for 128
[JioSaavnSong] OgFZRER2A3c: Downloading format info for 320
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec:vp9.2(10), channels, acodec, size, br, asr, proto, vext, aext, hasaud, source, id
[debug] Default format spec: bestvideo*+bestaudio/best
[info] OgFZRER2A3c: Downloading 1 format(s): 320
[debug] Invoking http downloader on "https://ac.cf.saavncdn.com/593/23d333a3c1c0f706b6c57629f756f059_320.mp4?Expires=1712173385&Signature=fheCPEBOGuUsngOaUjck3xkTBBIyGE9jHg50kaEmIuzpD5DVzGw~7RDZEUO2ZeCk7UUvxsM1N7svIPh3V7cEfi3BCLjkiYqKwxh044TorkWad-GC-1P4oOcovsfAf0GxwCVQg3syVwza3QQJLWeUrcUS36B~rhqJg5~4AzaK8z~sinByrUCG5~BENyvLNsCGUN5gVnLQH3QN9MaavJ742Vn9Ew7DrddQkQuRD25j84hvBtPQsUuD3VAUX9zg5h1~bZ3~fWdrXJbCMPUy4Wq4b6KZexmMPu7tO8IjpwGXDpTdgB94N9R2UrAqc7S7HghmXEwESbNXNiC-iX-VSBUpCw__&Key-Pair-Id=APKAJB334VX63D3WJ5ZQ"
[debug] File locking is not supported. Proceeding without locking
[download] Destination: Love Me Again [OgFZRER2A3c].mp4
[download] 100% of 9.20MiB in 00:00:01 at 5.19MiB/s
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/jiosaavn.py`
Content:
```
1 from .common import InfoExtractor
2 from ..utils import (
3 int_or_none,
4 js_to_json,
5 url_or_none,
6 urlencode_postdata,
7 urljoin,
8 )
9 from ..utils.traversal import traverse_obj
10
11
12 class JioSaavnBaseIE(InfoExtractor):
13 def _extract_initial_data(self, url, audio_id):
14 webpage = self._download_webpage(url, audio_id)
15 return self._search_json(
16 r'window\.__INITIAL_DATA__\s*=', webpage,
17 'init json', audio_id, transform_source=js_to_json)
18
19
20 class JioSaavnSongIE(JioSaavnBaseIE):
21 _VALID_URL = r'https?://(?:www\.)?(?:jiosaavn\.com/song/[^/?#]+/|saavn\.com/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)'
22 _TESTS = [{
23 'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk',
24 'md5': '3b84396d15ed9e083c3106f1fa589c04',
25 'info_dict': {
26 'id': 'OQsEfQFVUXk',
27 'ext': 'mp4',
28 'title': 'Leja Re',
29 'album': 'Leja Re',
30 'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',
31 'duration': 205,
32 'view_count': int,
33 'release_year': 2018,
34 },
35 }, {
36 'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',
37 'only_matching': True,
38 }]
39
40 _VALID_BITRATES = ('16', '32', '64', '128', '320')
41
42 def _real_extract(self, url):
43 audio_id = self._match_id(url)
44 extract_bitrates = self._configuration_arg('bitrate', ['128', '320'], ie_key='JioSaavn')
45 if invalid_bitrates := [br for br in extract_bitrates if br not in self._VALID_BITRATES]:
46 raise ValueError(
47 f'Invalid bitrate(s): {", ".join(invalid_bitrates)}. '
48 + f'Valid bitrates are: {", ".join(self._VALID_BITRATES)}')
49
50 song_data = self._extract_initial_data(url, audio_id)['song']['song']
51 formats = []
52 for bitrate in extract_bitrates:
53 media_data = self._download_json(
54 'https://www.jiosaavn.com/api.php', audio_id, f'Downloading format info for {bitrate}',
55 fatal=False, data=urlencode_postdata({
56 '__call': 'song.generateAuthToken',
57 '_format': 'json',
58 'bitrate': bitrate,
59 'url': song_data['encrypted_media_url'],
60 }))
61 if not media_data.get('auth_url'):
62 self.report_warning(f'Unable to extract format info for {bitrate}')
63 continue
64 formats.append({
65 'url': media_data['auth_url'],
66 'ext': media_data.get('type'),
67 'format_id': bitrate,
68 'abr': int(bitrate),
69 'vcodec': 'none',
70 })
71
72 return {
73 'id': audio_id,
74 'formats': formats,
75 **traverse_obj(song_data, {
76 'title': ('title', 'text'),
77 'album': ('album', 'text'),
78 'thumbnail': ('image', 0, {url_or_none}),
79 'duration': ('duration', {int_or_none}),
80 'view_count': ('play_count', {int_or_none}),
81 'release_year': ('year', {int_or_none}),
82 }),
83 }
84
85
86 class JioSaavnAlbumIE(JioSaavnBaseIE):
87 _VALID_URL = r'https?://(?:www\.)?(?:jio)?saavn\.com/album/[^/?#]+/(?P<id>[^/?#]+)'
88 _TESTS = [{
89 'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_',
90 'info_dict': {
91 'id': 'buIOjYZDrNA_',
92 'title': '96',
93 },
94 'playlist_count': 10,
95 }]
96
97 def _real_extract(self, url):
98 album_id = self._match_id(url)
99 album_view = self._extract_initial_data(url, album_id)['albumView']
100
101 return self.playlist_from_matches(
102 traverse_obj(album_view, (
103 'modules', lambda _, x: x['key'] == 'list', 'data', ..., 'title', 'action', {str})),
104 album_id, traverse_obj(album_view, ('album', 'title', 'text', {str})), ie=JioSaavnSongIE,
105 getter=lambda x: urljoin('https://www.jiosaavn.com/', x))
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/jiosaavn.py b/yt_dlp/extractor/jiosaavn.py
--- a/yt_dlp/extractor/jiosaavn.py
+++ b/yt_dlp/extractor/jiosaavn.py
@@ -2,6 +2,7 @@
from ..utils import (
int_or_none,
js_to_json,
+ orderedSet,
url_or_none,
urlencode_postdata,
urljoin,
@@ -31,6 +32,7 @@
'duration': 205,
'view_count': int,
'release_year': 2018,
+ 'artists': ['Sandesh Shandilya', 'Dhvani Bhanushali', 'Tanishk Bagchi', 'Rashmi Virag', 'Irshad Kamil'],
},
}, {
'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',
@@ -79,6 +81,7 @@
'duration': ('duration', {int_or_none}),
'view_count': ('play_count', {int_or_none}),
'release_year': ('year', {int_or_none}),
+ 'artists': ('artists', ..., 'name', {str}, all, {orderedSet}),
}),
}
|
{"golden_diff": "diff --git a/yt_dlp/extractor/jiosaavn.py b/yt_dlp/extractor/jiosaavn.py\n--- a/yt_dlp/extractor/jiosaavn.py\n+++ b/yt_dlp/extractor/jiosaavn.py\n@@ -2,6 +2,7 @@\n from ..utils import (\n int_or_none,\n js_to_json,\n+ orderedSet,\n url_or_none,\n urlencode_postdata,\n urljoin,\n@@ -31,6 +32,7 @@\n 'duration': 205,\n 'view_count': int,\n 'release_year': 2018,\n+ 'artists': ['Sandesh Shandilya', 'Dhvani Bhanushali', 'Tanishk Bagchi', 'Rashmi Virag', 'Irshad Kamil'],\n },\n }, {\n 'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',\n@@ -79,6 +81,7 @@\n 'duration': ('duration', {int_or_none}),\n 'view_count': ('play_count', {int_or_none}),\n 'release_year': ('year', {int_or_none}),\n+ 'artists': ('artists', ..., 'name', {str}, all, {orderedSet}),\n }),\n }\n", "issue": "[Jiosaavn] Extract more metadata\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm requesting a site-specific feature\n- [X] I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nIndia\n\n### Example URLs\n\nhttps://www.jiosaavn.com/song/love-me-again/OgFZRER2A3c\n\n### Provide a description that is worded well enough to be understood\n\nPlease add the option to include metadata when songs are downloaded froom Jiosaavn. I am able to add metadata when downloading from youtube music. Please do something similar for Jiosaavn\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [ ] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['--verbose', 'https://www.jiosaavn.com/song/love-me-again/OgFZRER2A3c']\r\n[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] from yt-dlp/yt-dlp [615a84447] (win_exe)\r\n[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19045-SP0 (OpenSSL 1.1.1k 25 Mar 2021)\r\n[debug] exe versions: ffmpeg N-112207-g8eb094adb2-20230928 (setts), ffprobe N-112207-g8eb094adb2-20230928\r\n[debug] Optional libraries: Cryptodome-3.20.0, brotli-1.1.0, certifi-2024.02.02, mutagen-1.47.0, requests-2.31.0, sqlite3-3.35.5, urllib3-2.2.1, websockets-12.0\r\n[debug] Proxy map: {}\r\n[debug] Request Handlers: urllib, requests, websockets\r\n[debug] Loaded 1803 extractors\r\n[JioSaavnSong] Extracting URL: https://www.jiosaavn.com/song/love-me-again/OgFZRER2A3c\r\n[JioSaavnSong] OgFZRER2A3c: Downloading webpage\r\n[JioSaavnSong] OgFZRER2A3c: Downloading format info for 128\r\n[JioSaavnSong] OgFZRER2A3c: Downloading format info for 320\r\n[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, hdr:12(7), vcodec:vp9.2(10), channels, acodec, size, br, asr, proto, vext, aext, hasaud, source, id\r\n[debug] Default format spec: bestvideo*+bestaudio/best\r\n[info] OgFZRER2A3c: Downloading 1 format(s): 320\r\n[debug] Invoking http downloader on \"https://ac.cf.saavncdn.com/593/23d333a3c1c0f706b6c57629f756f059_320.mp4?Expires=1712173385&Signature=fheCPEBOGuUsngOaUjck3xkTBBIyGE9jHg50kaEmIuzpD5DVzGw~7RDZEUO2ZeCk7UUvxsM1N7svIPh3V7cEfi3BCLjkiYqKwxh044TorkWad-GC-1P4oOcovsfAf0GxwCVQg3syVwza3QQJLWeUrcUS36B~rhqJg5~4AzaK8z~sinByrUCG5~BENyvLNsCGUN5gVnLQH3QN9MaavJ742Vn9Ew7DrddQkQuRD25j84hvBtPQsUuD3VAUX9zg5h1~bZ3~fWdrXJbCMPUy4Wq4b6KZexmMPu7tO8IjpwGXDpTdgB94N9R2UrAqc7S7HghmXEwESbNXNiC-iX-VSBUpCw__&Key-Pair-Id=APKAJB334VX63D3WJ5ZQ\"\r\n[debug] File locking is not supported. Proceeding without locking\r\n[download] Destination: Love Me Again [OgFZRER2A3c].mp4\r\n[download] 100% of 9.20MiB in 00:00:01 at 5.19MiB/s\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n js_to_json,\n url_or_none,\n urlencode_postdata,\n urljoin,\n)\nfrom ..utils.traversal import traverse_obj\n\n\nclass JioSaavnBaseIE(InfoExtractor):\n def _extract_initial_data(self, url, audio_id):\n webpage = self._download_webpage(url, audio_id)\n return self._search_json(\n r'window\\.__INITIAL_DATA__\\s*=', webpage,\n 'init json', audio_id, transform_source=js_to_json)\n\n\nclass JioSaavnSongIE(JioSaavnBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?(?:jiosaavn\\.com/song/[^/?#]+/|saavn\\.com/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk',\n 'md5': '3b84396d15ed9e083c3106f1fa589c04',\n 'info_dict': {\n 'id': 'OQsEfQFVUXk',\n 'ext': 'mp4',\n 'title': 'Leja Re',\n 'album': 'Leja Re',\n 'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',\n 'duration': 205,\n 'view_count': int,\n 'release_year': 2018,\n },\n }, {\n 'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',\n 'only_matching': True,\n }]\n\n _VALID_BITRATES = ('16', '32', '64', '128', '320')\n\n def _real_extract(self, url):\n audio_id = self._match_id(url)\n extract_bitrates = self._configuration_arg('bitrate', ['128', '320'], ie_key='JioSaavn')\n if invalid_bitrates := [br for br in extract_bitrates if br not in self._VALID_BITRATES]:\n raise ValueError(\n f'Invalid bitrate(s): {\", \".join(invalid_bitrates)}. '\n + f'Valid bitrates are: {\", \".join(self._VALID_BITRATES)}')\n\n song_data = self._extract_initial_data(url, audio_id)['song']['song']\n formats = []\n for bitrate in extract_bitrates:\n media_data = self._download_json(\n 'https://www.jiosaavn.com/api.php', audio_id, f'Downloading format info for {bitrate}',\n fatal=False, data=urlencode_postdata({\n '__call': 'song.generateAuthToken',\n '_format': 'json',\n 'bitrate': bitrate,\n 'url': song_data['encrypted_media_url'],\n }))\n if not media_data.get('auth_url'):\n self.report_warning(f'Unable to extract format info for {bitrate}')\n continue\n formats.append({\n 'url': media_data['auth_url'],\n 'ext': media_data.get('type'),\n 'format_id': bitrate,\n 'abr': int(bitrate),\n 'vcodec': 'none',\n })\n\n return {\n 'id': audio_id,\n 'formats': formats,\n **traverse_obj(song_data, {\n 'title': ('title', 'text'),\n 'album': ('album', 'text'),\n 'thumbnail': ('image', 0, {url_or_none}),\n 'duration': ('duration', {int_or_none}),\n 'view_count': ('play_count', {int_or_none}),\n 'release_year': ('year', {int_or_none}),\n }),\n }\n\n\nclass JioSaavnAlbumIE(JioSaavnBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?(?:jio)?saavn\\.com/album/[^/?#]+/(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_',\n 'info_dict': {\n 'id': 'buIOjYZDrNA_',\n 'title': '96',\n },\n 'playlist_count': 10,\n }]\n\n def _real_extract(self, url):\n album_id = self._match_id(url)\n album_view = self._extract_initial_data(url, album_id)['albumView']\n\n return self.playlist_from_matches(\n traverse_obj(album_view, (\n 'modules', lambda _, x: x['key'] == 'list', 'data', ..., 'title', 'action', {str})),\n album_id, traverse_obj(album_view, ('album', 'title', 'text', {str})), ie=JioSaavnSongIE,\n getter=lambda x: urljoin('https://www.jiosaavn.com/', x))\n", "path": "yt_dlp/extractor/jiosaavn.py"}], "after_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n js_to_json,\n orderedSet,\n url_or_none,\n urlencode_postdata,\n urljoin,\n)\nfrom ..utils.traversal import traverse_obj\n\n\nclass JioSaavnBaseIE(InfoExtractor):\n def _extract_initial_data(self, url, audio_id):\n webpage = self._download_webpage(url, audio_id)\n return self._search_json(\n r'window\\.__INITIAL_DATA__\\s*=', webpage,\n 'init json', audio_id, transform_source=js_to_json)\n\n\nclass JioSaavnSongIE(JioSaavnBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?(?:jiosaavn\\.com/song/[^/?#]+/|saavn\\.com/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk',\n 'md5': '3b84396d15ed9e083c3106f1fa589c04',\n 'info_dict': {\n 'id': 'OQsEfQFVUXk',\n 'ext': 'mp4',\n 'title': 'Leja Re',\n 'album': 'Leja Re',\n 'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',\n 'duration': 205,\n 'view_count': int,\n 'release_year': 2018,\n 'artists': ['Sandesh Shandilya', 'Dhvani Bhanushali', 'Tanishk Bagchi', 'Rashmi Virag', 'Irshad Kamil'],\n },\n }, {\n 'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',\n 'only_matching': True,\n }]\n\n _VALID_BITRATES = ('16', '32', '64', '128', '320')\n\n def _real_extract(self, url):\n audio_id = self._match_id(url)\n extract_bitrates = self._configuration_arg('bitrate', ['128', '320'], ie_key='JioSaavn')\n if invalid_bitrates := [br for br in extract_bitrates if br not in self._VALID_BITRATES]:\n raise ValueError(\n f'Invalid bitrate(s): {\", \".join(invalid_bitrates)}. '\n + f'Valid bitrates are: {\", \".join(self._VALID_BITRATES)}')\n\n song_data = self._extract_initial_data(url, audio_id)['song']['song']\n formats = []\n for bitrate in extract_bitrates:\n media_data = self._download_json(\n 'https://www.jiosaavn.com/api.php', audio_id, f'Downloading format info for {bitrate}',\n fatal=False, data=urlencode_postdata({\n '__call': 'song.generateAuthToken',\n '_format': 'json',\n 'bitrate': bitrate,\n 'url': song_data['encrypted_media_url'],\n }))\n if not media_data.get('auth_url'):\n self.report_warning(f'Unable to extract format info for {bitrate}')\n continue\n formats.append({\n 'url': media_data['auth_url'],\n 'ext': media_data.get('type'),\n 'format_id': bitrate,\n 'abr': int(bitrate),\n 'vcodec': 'none',\n })\n\n return {\n 'id': audio_id,\n 'formats': formats,\n **traverse_obj(song_data, {\n 'title': ('title', 'text'),\n 'album': ('album', 'text'),\n 'thumbnail': ('image', 0, {url_or_none}),\n 'duration': ('duration', {int_or_none}),\n 'view_count': ('play_count', {int_or_none}),\n 'release_year': ('year', {int_or_none}),\n 'artists': ('artists', ..., 'name', {str}, all, {orderedSet}),\n }),\n }\n\n\nclass JioSaavnAlbumIE(JioSaavnBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?(?:jio)?saavn\\.com/album/[^/?#]+/(?P<id>[^/?#]+)'\n _TESTS = [{\n 'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_',\n 'info_dict': {\n 'id': 'buIOjYZDrNA_',\n 'title': '96',\n },\n 'playlist_count': 10,\n }]\n\n def _real_extract(self, url):\n album_id = self._match_id(url)\n album_view = self._extract_initial_data(url, album_id)['albumView']\n\n return self.playlist_from_matches(\n traverse_obj(album_view, (\n 'modules', lambda _, x: x['key'] == 'list', 'data', ..., 'title', 'action', {str})),\n album_id, traverse_obj(album_view, ('album', 'title', 'text', {str})), ie=JioSaavnSongIE,\n getter=lambda x: urljoin('https://www.jiosaavn.com/', x))\n", "path": "yt_dlp/extractor/jiosaavn.py"}]}
| 3,101 | 299 |
gh_patches_debug_14246
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1554
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
raises_unsupported_algorithm gives very unhelpful errors
When the error tag is wrong you get errors along the lines of `assert <object object at 0xf0000000> is not <object object as 0xb0000000>`. This is not very helpful, it's not even particularly obvious that the error tag is actually what's wrong until you go and read the code.
Should probably generate a useful error message or somehow give the tag objects a more useful `repr` output.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/exceptions.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7
8 class _Reasons(object):
9 BACKEND_MISSING_INTERFACE = object()
10 UNSUPPORTED_HASH = object()
11 UNSUPPORTED_CIPHER = object()
12 UNSUPPORTED_PADDING = object()
13 UNSUPPORTED_MGF = object()
14 UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()
15 UNSUPPORTED_ELLIPTIC_CURVE = object()
16 UNSUPPORTED_SERIALIZATION = object()
17 UNSUPPORTED_X509 = object()
18
19
20 class UnsupportedAlgorithm(Exception):
21 def __init__(self, message, reason=None):
22 super(UnsupportedAlgorithm, self).__init__(message)
23 self._reason = reason
24
25
26 class AlreadyFinalized(Exception):
27 pass
28
29
30 class AlreadyUpdated(Exception):
31 pass
32
33
34 class NotYetFinalized(Exception):
35 pass
36
37
38 class InvalidTag(Exception):
39 pass
40
41
42 class InvalidSignature(Exception):
43 pass
44
45
46 class InternalError(Exception):
47 pass
48
49
50 class InvalidKey(Exception):
51 pass
52
53
54 class InvalidToken(Exception):
55 pass
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/exceptions.py b/src/cryptography/exceptions.py
--- a/src/cryptography/exceptions.py
+++ b/src/cryptography/exceptions.py
@@ -4,17 +4,19 @@
from __future__ import absolute_import, division, print_function
-
-class _Reasons(object):
- BACKEND_MISSING_INTERFACE = object()
- UNSUPPORTED_HASH = object()
- UNSUPPORTED_CIPHER = object()
- UNSUPPORTED_PADDING = object()
- UNSUPPORTED_MGF = object()
- UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()
- UNSUPPORTED_ELLIPTIC_CURVE = object()
- UNSUPPORTED_SERIALIZATION = object()
- UNSUPPORTED_X509 = object()
+from enum import Enum
+
+
+class _Reasons(Enum):
+ BACKEND_MISSING_INTERFACE = 0
+ UNSUPPORTED_HASH = 1
+ UNSUPPORTED_CIPHER = 2
+ UNSUPPORTED_PADDING = 3
+ UNSUPPORTED_MGF = 4
+ UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5
+ UNSUPPORTED_ELLIPTIC_CURVE = 6
+ UNSUPPORTED_SERIALIZATION = 7
+ UNSUPPORTED_X509 = 8
class UnsupportedAlgorithm(Exception):
|
{"golden_diff": "diff --git a/src/cryptography/exceptions.py b/src/cryptography/exceptions.py\n--- a/src/cryptography/exceptions.py\n+++ b/src/cryptography/exceptions.py\n@@ -4,17 +4,19 @@\n \n from __future__ import absolute_import, division, print_function\n \n-\n-class _Reasons(object):\n- BACKEND_MISSING_INTERFACE = object()\n- UNSUPPORTED_HASH = object()\n- UNSUPPORTED_CIPHER = object()\n- UNSUPPORTED_PADDING = object()\n- UNSUPPORTED_MGF = object()\n- UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()\n- UNSUPPORTED_ELLIPTIC_CURVE = object()\n- UNSUPPORTED_SERIALIZATION = object()\n- UNSUPPORTED_X509 = object()\n+from enum import Enum\n+\n+\n+class _Reasons(Enum):\n+ BACKEND_MISSING_INTERFACE = 0\n+ UNSUPPORTED_HASH = 1\n+ UNSUPPORTED_CIPHER = 2\n+ UNSUPPORTED_PADDING = 3\n+ UNSUPPORTED_MGF = 4\n+ UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5\n+ UNSUPPORTED_ELLIPTIC_CURVE = 6\n+ UNSUPPORTED_SERIALIZATION = 7\n+ UNSUPPORTED_X509 = 8\n \n \n class UnsupportedAlgorithm(Exception):\n", "issue": "raises_unsupported_algorithm gives very unhelpful errors\nWhen the error tag is wrong you get errors along the lines of `assert <object object at 0xf0000000> is not <object object as 0xb0000000>`. This is not very helpful, it's not even particularly obvious that the error tag is actually what's wrong until you go and read the code.\n\nShould probably generate a useful error message or somehow give the tag objects a more useful `repr` output.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n\nclass _Reasons(object):\n BACKEND_MISSING_INTERFACE = object()\n UNSUPPORTED_HASH = object()\n UNSUPPORTED_CIPHER = object()\n UNSUPPORTED_PADDING = object()\n UNSUPPORTED_MGF = object()\n UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()\n UNSUPPORTED_ELLIPTIC_CURVE = object()\n UNSUPPORTED_SERIALIZATION = object()\n UNSUPPORTED_X509 = object()\n\n\nclass UnsupportedAlgorithm(Exception):\n def __init__(self, message, reason=None):\n super(UnsupportedAlgorithm, self).__init__(message)\n self._reason = reason\n\n\nclass AlreadyFinalized(Exception):\n pass\n\n\nclass AlreadyUpdated(Exception):\n pass\n\n\nclass NotYetFinalized(Exception):\n pass\n\n\nclass InvalidTag(Exception):\n pass\n\n\nclass InvalidSignature(Exception):\n pass\n\n\nclass InternalError(Exception):\n pass\n\n\nclass InvalidKey(Exception):\n pass\n\n\nclass InvalidToken(Exception):\n pass\n", "path": "src/cryptography/exceptions.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom enum import Enum\n\n\nclass _Reasons(Enum):\n BACKEND_MISSING_INTERFACE = 0\n UNSUPPORTED_HASH = 1\n UNSUPPORTED_CIPHER = 2\n UNSUPPORTED_PADDING = 3\n UNSUPPORTED_MGF = 4\n UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5\n UNSUPPORTED_ELLIPTIC_CURVE = 6\n UNSUPPORTED_SERIALIZATION = 7\n UNSUPPORTED_X509 = 8\n\n\nclass UnsupportedAlgorithm(Exception):\n def __init__(self, message, reason=None):\n super(UnsupportedAlgorithm, self).__init__(message)\n self._reason = reason\n\n\nclass AlreadyFinalized(Exception):\n pass\n\n\nclass AlreadyUpdated(Exception):\n pass\n\n\nclass NotYetFinalized(Exception):\n pass\n\n\nclass InvalidTag(Exception):\n pass\n\n\nclass InvalidSignature(Exception):\n pass\n\n\nclass InternalError(Exception):\n pass\n\n\nclass InvalidKey(Exception):\n pass\n\n\nclass InvalidToken(Exception):\n pass\n", "path": "src/cryptography/exceptions.py"}]}
| 737 | 275 |
gh_patches_debug_3149
|
rasdani/github-patches
|
git_diff
|
huggingface__dataset-viewer-479
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use main instead of master to load the datasets
The main branch in https://github.com/huggingface/datasets is now `main`, not `master` anymore. Note that it's backward compatible, so no need to hurry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `services/worker/src/worker/constants.py`
Content:
```
1 from typing import Optional
2
3 DEFAULT_ASSETS_BASE_URL: str = "assets"
4 DEFAULT_ASSETS_DIRECTORY: None = None
5 DEFAULT_DATASETS_REVISION: str = "master"
6 DEFAULT_HF_TOKEN: Optional[str] = None
7 DEFAULT_LOG_LEVEL: str = "INFO"
8 DEFAULT_MAX_JOB_RETRIES: int = 3
9 DEFAULT_MAX_JOBS_PER_DATASET: int = 1
10 DEFAULT_MAX_LOAD_PCT: int = 70
11 DEFAULT_MAX_MEMORY_PCT: int = 80
12 DEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000
13 DEFAULT_MIN_CELL_BYTES: int = 100
14 DEFAULT_MONGO_CACHE_DATABASE: str = "datasets_server_cache"
15 DEFAULT_MONGO_QUEUE_DATABASE: str = "datasets_server_queue"
16 DEFAULT_MONGO_URL: str = "mongodb://localhost:27018"
17 DEFAULT_ROWS_MAX_BYTES: int = 1_000_000
18 DEFAULT_ROWS_MAX_NUMBER: int = 100
19 DEFAULT_ROWS_MIN_NUMBER: int = 10
20 DEFAULT_WORKER_SLEEP_SECONDS: int = 15
21 DEFAULT_WORKER_QUEUE: str = "datasets"
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/services/worker/src/worker/constants.py b/services/worker/src/worker/constants.py
--- a/services/worker/src/worker/constants.py
+++ b/services/worker/src/worker/constants.py
@@ -2,7 +2,7 @@
DEFAULT_ASSETS_BASE_URL: str = "assets"
DEFAULT_ASSETS_DIRECTORY: None = None
-DEFAULT_DATASETS_REVISION: str = "master"
+DEFAULT_DATASETS_REVISION: str = "main"
DEFAULT_HF_TOKEN: Optional[str] = None
DEFAULT_LOG_LEVEL: str = "INFO"
DEFAULT_MAX_JOB_RETRIES: int = 3
|
{"golden_diff": "diff --git a/services/worker/src/worker/constants.py b/services/worker/src/worker/constants.py\n--- a/services/worker/src/worker/constants.py\n+++ b/services/worker/src/worker/constants.py\n@@ -2,7 +2,7 @@\n \n DEFAULT_ASSETS_BASE_URL: str = \"assets\"\n DEFAULT_ASSETS_DIRECTORY: None = None\n-DEFAULT_DATASETS_REVISION: str = \"master\"\n+DEFAULT_DATASETS_REVISION: str = \"main\"\n DEFAULT_HF_TOKEN: Optional[str] = None\n DEFAULT_LOG_LEVEL: str = \"INFO\"\n DEFAULT_MAX_JOB_RETRIES: int = 3\n", "issue": "Use main instead of master to load the datasets\nThe main branch in https://github.com/huggingface/datasets is now `main`, not `master` anymore. Note that it's backward compatible, so no need to hurry\n", "before_files": [{"content": "from typing import Optional\n\nDEFAULT_ASSETS_BASE_URL: str = \"assets\"\nDEFAULT_ASSETS_DIRECTORY: None = None\nDEFAULT_DATASETS_REVISION: str = \"master\"\nDEFAULT_HF_TOKEN: Optional[str] = None\nDEFAULT_LOG_LEVEL: str = \"INFO\"\nDEFAULT_MAX_JOB_RETRIES: int = 3\nDEFAULT_MAX_JOBS_PER_DATASET: int = 1\nDEFAULT_MAX_LOAD_PCT: int = 70\nDEFAULT_MAX_MEMORY_PCT: int = 80\nDEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000\nDEFAULT_MIN_CELL_BYTES: int = 100\nDEFAULT_MONGO_CACHE_DATABASE: str = \"datasets_server_cache\"\nDEFAULT_MONGO_QUEUE_DATABASE: str = \"datasets_server_queue\"\nDEFAULT_MONGO_URL: str = \"mongodb://localhost:27018\"\nDEFAULT_ROWS_MAX_BYTES: int = 1_000_000\nDEFAULT_ROWS_MAX_NUMBER: int = 100\nDEFAULT_ROWS_MIN_NUMBER: int = 10\nDEFAULT_WORKER_SLEEP_SECONDS: int = 15\nDEFAULT_WORKER_QUEUE: str = \"datasets\"\n", "path": "services/worker/src/worker/constants.py"}], "after_files": [{"content": "from typing import Optional\n\nDEFAULT_ASSETS_BASE_URL: str = \"assets\"\nDEFAULT_ASSETS_DIRECTORY: None = None\nDEFAULT_DATASETS_REVISION: str = \"main\"\nDEFAULT_HF_TOKEN: Optional[str] = None\nDEFAULT_LOG_LEVEL: str = \"INFO\"\nDEFAULT_MAX_JOB_RETRIES: int = 3\nDEFAULT_MAX_JOBS_PER_DATASET: int = 1\nDEFAULT_MAX_LOAD_PCT: int = 70\nDEFAULT_MAX_MEMORY_PCT: int = 80\nDEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000\nDEFAULT_MIN_CELL_BYTES: int = 100\nDEFAULT_MONGO_CACHE_DATABASE: str = \"datasets_server_cache\"\nDEFAULT_MONGO_QUEUE_DATABASE: str = \"datasets_server_queue\"\nDEFAULT_MONGO_URL: str = \"mongodb://localhost:27018\"\nDEFAULT_ROWS_MAX_BYTES: int = 1_000_000\nDEFAULT_ROWS_MAX_NUMBER: int = 100\nDEFAULT_ROWS_MIN_NUMBER: int = 10\nDEFAULT_WORKER_SLEEP_SECONDS: int = 15\nDEFAULT_WORKER_QUEUE: str = \"datasets\"\n", "path": "services/worker/src/worker/constants.py"}]}
| 590 | 132 |
gh_patches_debug_24387
|
rasdani/github-patches
|
git_diff
|
napari__napari-4401
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Screenshot test failing on main (due to vispy 0.10?)
## 🐛 Bug
The test `napari/_tests/test_with_screenshot.py:test_z_order_image_points_after_ndisplay` is failing on main:
https://github.com/napari/napari/runs/6069251907?check_suite_focus=true#step:7:294
I suspect that this is due to the VisPy 0.10 release, which happened in the last 24h or so.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_vispy/visuals/volume.py`
Content:
```
1 from vispy.scene.visuals import Volume as BaseVolume
2
3 FUNCTION_DEFINITIONS = """
4 // the tolerance for testing equality of floats with floatEqual and floatNotEqual
5 const float equality_tolerance = 1e-8;
6
7 bool floatNotEqual(float val1, float val2)
8 {
9 // check if val1 and val2 are not equal
10 bool not_equal = abs(val1 - val2) > equality_tolerance;
11
12 return not_equal;
13 }
14
15 bool floatEqual(float val1, float val2)
16 {
17 // check if val1 and val2 are equal
18 bool equal = abs(val1 - val2) < equality_tolerance;
19
20 return equal;
21 }
22
23
24 // the background value for the iso_categorical shader
25 const float categorical_bg_value = 0;
26
27 int detectAdjacentBackground(float val_neg, float val_pos)
28 {
29 // determine if the adjacent voxels along an axis are both background
30 int adjacent_bg = int( floatEqual(val_neg, categorical_bg_value) );
31 adjacent_bg = adjacent_bg * int( floatEqual(val_pos, categorical_bg_value) );
32 return adjacent_bg;
33 }
34
35 vec4 calculateCategoricalColor(vec4 betterColor, vec3 loc, vec3 step)
36 {
37 // Calculate color by incorporating ambient and diffuse lighting
38 vec4 color0 = $sample(u_volumetex, loc);
39 vec4 color1;
40 vec4 color2;
41 float val0 = colorToVal(color0);
42 float val1 = 0;
43 float val2 = 0;
44 int n_bg_borders = 0;
45
46 // View direction
47 vec3 V = normalize(view_ray);
48
49 // calculate normal vector from gradient
50 vec3 N; // normal
51 color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );
52 color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );
53 val1 = colorToVal(color1);
54 val2 = colorToVal(color2);
55 N[0] = val1 - val2;
56 n_bg_borders += detectAdjacentBackground(val1, val2);
57
58 color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );
59 color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );
60 val1 = colorToVal(color1);
61 val2 = colorToVal(color2);
62 N[1] = val1 - val2;
63 n_bg_borders += detectAdjacentBackground(val1, val2);
64
65 color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );
66 color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );
67 val1 = colorToVal(color1);
68 val2 = colorToVal(color2);
69 N[2] = val1 - val2;
70 n_bg_borders += detectAdjacentBackground(val1, val2);
71
72 // Normalize and flip normal so it points towards viewer
73 N = normalize(N);
74 float Nselect = float(dot(N,V) > 0.0);
75 N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;
76
77 // Init colors
78 vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);
79 vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);
80 vec4 final_color;
81
82 // todo: allow multiple light, define lights on viewvox or subscene
83 int nlights = 1;
84 for (int i=0; i<nlights; i++)
85 {
86 // Get light direction (make sure to prevent zero devision)
87 vec3 L = normalize(view_ray); //lightDirs[i];
88 float lightEnabled = float( length(L) > 0.0 );
89 L = normalize(L+(1.0-lightEnabled));
90
91 // Calculate lighting properties
92 float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );
93 if (n_bg_borders > 0) {
94 // to fix dim pixels due to poor normal estimation,
95 // we give a default lambda to pixels surrounded by background
96 lambertTerm = 0.5;
97 }
98
99 // Calculate mask
100 float mask1 = lightEnabled;
101
102 // Calculate colors
103 ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;
104 diffuse_color += mask1 * lambertTerm;
105 }
106
107 // Calculate final color by componing different components
108 final_color = betterColor * ( ambient_color + diffuse_color);
109 final_color.a = betterColor.a;
110
111 // Done
112 return final_color;
113 }
114 """
115
116 ISO_CATEGORICAL_SNIPPETS = dict(
117 before_loop="""
118 vec4 color3 = vec4(0.0); // final color
119 vec3 dstep = 1.5 / u_shape; // step to sample derivative, set to match iso shader
120 gl_FragColor = vec4(0.0);
121 """,
122 in_loop="""
123 // check if value is different from the background value
124 if ( floatNotEqual(val, categorical_bg_value) ) {
125 // Take the last interval in smaller steps
126 vec3 iloc = loc - step;
127 for (int i=0; i<10; i++) {
128 color = $sample(u_volumetex, iloc);
129 if (floatNotEqual(color.g, categorical_bg_value) ) {
130 // when the non-background value is reached
131 // calculate the color (apply lighting effects)
132 color = applyColormap(color.g);
133 color = calculateCategoricalColor(color, iloc, dstep);
134 gl_FragColor = color;
135
136 // set the variables for the depth buffer
137 surface_point = iloc * u_shape;
138 surface_found = true;
139
140 iter = nsteps;
141 break;
142 }
143 iloc += step * 0.1;
144 }
145 }
146 """,
147 after_loop="""
148 if (surface_found == false) {
149 discard;
150 }
151 """,
152 )
153
154 shaders = BaseVolume._shaders.copy()
155 before, after = shaders['fragment'].split('void main()')
156 shaders['fragment'] = before + FUNCTION_DEFINITIONS + 'void main()' + after
157
158 rendering_methods = BaseVolume._rendering_methods.copy()
159 rendering_methods['iso_categorical'] = ISO_CATEGORICAL_SNIPPETS
160
161
162 class Volume(BaseVolume):
163 # add the new rendering method to the snippets dict
164 _shaders = shaders
165 _rendering_methods = rendering_methods
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/napari/_vispy/visuals/volume.py b/napari/_vispy/visuals/volume.py
--- a/napari/_vispy/visuals/volume.py
+++ b/napari/_vispy/visuals/volume.py
@@ -118,6 +118,7 @@
vec4 color3 = vec4(0.0); // final color
vec3 dstep = 1.5 / u_shape; // step to sample derivative, set to match iso shader
gl_FragColor = vec4(0.0);
+ bool discard_fragment = true;
""",
in_loop="""
// check if value is different from the background value
@@ -134,8 +135,8 @@
gl_FragColor = color;
// set the variables for the depth buffer
- surface_point = iloc * u_shape;
- surface_found = true;
+ frag_depth_point = iloc * u_shape;
+ discard_fragment = false;
iter = nsteps;
break;
@@ -145,9 +146,8 @@
}
""",
after_loop="""
- if (surface_found == false) {
+ if (discard_fragment)
discard;
- }
""",
)
|
{"golden_diff": "diff --git a/napari/_vispy/visuals/volume.py b/napari/_vispy/visuals/volume.py\n--- a/napari/_vispy/visuals/volume.py\n+++ b/napari/_vispy/visuals/volume.py\n@@ -118,6 +118,7 @@\n vec4 color3 = vec4(0.0); // final color\n vec3 dstep = 1.5 / u_shape; // step to sample derivative, set to match iso shader\n gl_FragColor = vec4(0.0);\n+ bool discard_fragment = true;\n \"\"\",\n in_loop=\"\"\"\n // check if value is different from the background value\n@@ -134,8 +135,8 @@\n gl_FragColor = color;\n \n // set the variables for the depth buffer\n- surface_point = iloc * u_shape;\n- surface_found = true;\n+ frag_depth_point = iloc * u_shape;\n+ discard_fragment = false;\n \n iter = nsteps;\n break;\n@@ -145,9 +146,8 @@\n }\n \"\"\",\n after_loop=\"\"\"\n- if (surface_found == false) {\n+ if (discard_fragment)\n discard;\n- }\n \"\"\",\n )\n", "issue": "Screenshot test failing on main (due to vispy 0.10?)\n## \ud83d\udc1b Bug\r\n\r\nThe test `napari/_tests/test_with_screenshot.py:test_z_order_image_points_after_ndisplay` is failing on main:\r\n\r\nhttps://github.com/napari/napari/runs/6069251907?check_suite_focus=true#step:7:294\r\n\r\nI suspect that this is due to the VisPy 0.10 release, which happened in the last 24h or so.\n", "before_files": [{"content": "from vispy.scene.visuals import Volume as BaseVolume\n\nFUNCTION_DEFINITIONS = \"\"\"\n// the tolerance for testing equality of floats with floatEqual and floatNotEqual\nconst float equality_tolerance = 1e-8;\n\nbool floatNotEqual(float val1, float val2)\n{\n // check if val1 and val2 are not equal\n bool not_equal = abs(val1 - val2) > equality_tolerance;\n\n return not_equal;\n}\n\nbool floatEqual(float val1, float val2)\n{\n // check if val1 and val2 are equal\n bool equal = abs(val1 - val2) < equality_tolerance;\n\n return equal;\n}\n\n\n// the background value for the iso_categorical shader\nconst float categorical_bg_value = 0;\n\nint detectAdjacentBackground(float val_neg, float val_pos)\n{\n // determine if the adjacent voxels along an axis are both background\n int adjacent_bg = int( floatEqual(val_neg, categorical_bg_value) );\n adjacent_bg = adjacent_bg * int( floatEqual(val_pos, categorical_bg_value) );\n return adjacent_bg;\n}\n\nvec4 calculateCategoricalColor(vec4 betterColor, vec3 loc, vec3 step)\n{\n // Calculate color by incorporating ambient and diffuse lighting\n vec4 color0 = $sample(u_volumetex, loc);\n vec4 color1;\n vec4 color2;\n float val0 = colorToVal(color0);\n float val1 = 0;\n float val2 = 0;\n int n_bg_borders = 0;\n\n // View direction\n vec3 V = normalize(view_ray);\n\n // calculate normal vector from gradient\n vec3 N; // normal\n color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );\n color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );\n val1 = colorToVal(color1);\n val2 = colorToVal(color2);\n N[0] = val1 - val2;\n n_bg_borders += detectAdjacentBackground(val1, val2);\n\n color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );\n color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );\n val1 = colorToVal(color1);\n val2 = colorToVal(color2);\n N[1] = val1 - val2;\n n_bg_borders += detectAdjacentBackground(val1, val2);\n\n color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );\n color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );\n val1 = colorToVal(color1);\n val2 = colorToVal(color2);\n N[2] = val1 - val2;\n n_bg_borders += detectAdjacentBackground(val1, val2);\n\n // Normalize and flip normal so it points towards viewer\n N = normalize(N);\n float Nselect = float(dot(N,V) > 0.0);\n N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;\n\n // Init colors\n vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 final_color;\n\n // todo: allow multiple light, define lights on viewvox or subscene\n int nlights = 1;\n for (int i=0; i<nlights; i++)\n {\n // Get light direction (make sure to prevent zero devision)\n vec3 L = normalize(view_ray); //lightDirs[i];\n float lightEnabled = float( length(L) > 0.0 );\n L = normalize(L+(1.0-lightEnabled));\n\n // Calculate lighting properties\n float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );\n if (n_bg_borders > 0) {\n // to fix dim pixels due to poor normal estimation,\n // we give a default lambda to pixels surrounded by background\n lambertTerm = 0.5;\n }\n\n // Calculate mask\n float mask1 = lightEnabled;\n\n // Calculate colors\n ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;\n diffuse_color += mask1 * lambertTerm;\n }\n\n // Calculate final color by componing different components\n final_color = betterColor * ( ambient_color + diffuse_color);\n final_color.a = betterColor.a;\n\n // Done\n return final_color;\n}\n\"\"\"\n\nISO_CATEGORICAL_SNIPPETS = dict(\n before_loop=\"\"\"\n vec4 color3 = vec4(0.0); // final color\n vec3 dstep = 1.5 / u_shape; // step to sample derivative, set to match iso shader\n gl_FragColor = vec4(0.0);\n \"\"\",\n in_loop=\"\"\"\n // check if value is different from the background value\n if ( floatNotEqual(val, categorical_bg_value) ) {\n // Take the last interval in smaller steps\n vec3 iloc = loc - step;\n for (int i=0; i<10; i++) {\n color = $sample(u_volumetex, iloc);\n if (floatNotEqual(color.g, categorical_bg_value) ) {\n // when the non-background value is reached\n // calculate the color (apply lighting effects)\n color = applyColormap(color.g);\n color = calculateCategoricalColor(color, iloc, dstep);\n gl_FragColor = color;\n\n // set the variables for the depth buffer\n surface_point = iloc * u_shape;\n surface_found = true;\n\n iter = nsteps;\n break;\n }\n iloc += step * 0.1;\n }\n }\n \"\"\",\n after_loop=\"\"\"\n if (surface_found == false) {\n discard;\n }\n \"\"\",\n)\n\nshaders = BaseVolume._shaders.copy()\nbefore, after = shaders['fragment'].split('void main()')\nshaders['fragment'] = before + FUNCTION_DEFINITIONS + 'void main()' + after\n\nrendering_methods = BaseVolume._rendering_methods.copy()\nrendering_methods['iso_categorical'] = ISO_CATEGORICAL_SNIPPETS\n\n\nclass Volume(BaseVolume):\n # add the new rendering method to the snippets dict\n _shaders = shaders\n _rendering_methods = rendering_methods\n", "path": "napari/_vispy/visuals/volume.py"}], "after_files": [{"content": "from vispy.scene.visuals import Volume as BaseVolume\n\nFUNCTION_DEFINITIONS = \"\"\"\n// the tolerance for testing equality of floats with floatEqual and floatNotEqual\nconst float equality_tolerance = 1e-8;\n\nbool floatNotEqual(float val1, float val2)\n{\n // check if val1 and val2 are not equal\n bool not_equal = abs(val1 - val2) > equality_tolerance;\n\n return not_equal;\n}\n\nbool floatEqual(float val1, float val2)\n{\n // check if val1 and val2 are equal\n bool equal = abs(val1 - val2) < equality_tolerance;\n\n return equal;\n}\n\n\n// the background value for the iso_categorical shader\nconst float categorical_bg_value = 0;\n\nint detectAdjacentBackground(float val_neg, float val_pos)\n{\n // determine if the adjacent voxels along an axis are both background\n int adjacent_bg = int( floatEqual(val_neg, categorical_bg_value) );\n adjacent_bg = adjacent_bg * int( floatEqual(val_pos, categorical_bg_value) );\n return adjacent_bg;\n}\n\nvec4 calculateCategoricalColor(vec4 betterColor, vec3 loc, vec3 step)\n{\n // Calculate color by incorporating ambient and diffuse lighting\n vec4 color0 = $sample(u_volumetex, loc);\n vec4 color1;\n vec4 color2;\n float val0 = colorToVal(color0);\n float val1 = 0;\n float val2 = 0;\n int n_bg_borders = 0;\n\n // View direction\n vec3 V = normalize(view_ray);\n\n // calculate normal vector from gradient\n vec3 N; // normal\n color1 = $sample( u_volumetex, loc+vec3(-step[0],0.0,0.0) );\n color2 = $sample( u_volumetex, loc+vec3(step[0],0.0,0.0) );\n val1 = colorToVal(color1);\n val2 = colorToVal(color2);\n N[0] = val1 - val2;\n n_bg_borders += detectAdjacentBackground(val1, val2);\n\n color1 = $sample( u_volumetex, loc+vec3(0.0,-step[1],0.0) );\n color2 = $sample( u_volumetex, loc+vec3(0.0,step[1],0.0) );\n val1 = colorToVal(color1);\n val2 = colorToVal(color2);\n N[1] = val1 - val2;\n n_bg_borders += detectAdjacentBackground(val1, val2);\n\n color1 = $sample( u_volumetex, loc+vec3(0.0,0.0,-step[2]) );\n color2 = $sample( u_volumetex, loc+vec3(0.0,0.0,step[2]) );\n val1 = colorToVal(color1);\n val2 = colorToVal(color2);\n N[2] = val1 - val2;\n n_bg_borders += detectAdjacentBackground(val1, val2);\n\n // Normalize and flip normal so it points towards viewer\n N = normalize(N);\n float Nselect = float(dot(N,V) > 0.0);\n N = (2.0*Nselect - 1.0) * N; // == Nselect * N - (1.0-Nselect)*N;\n\n // Init colors\n vec4 ambient_color = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 diffuse_color = vec4(0.0, 0.0, 0.0, 0.0);\n vec4 final_color;\n\n // todo: allow multiple light, define lights on viewvox or subscene\n int nlights = 1;\n for (int i=0; i<nlights; i++)\n {\n // Get light direction (make sure to prevent zero devision)\n vec3 L = normalize(view_ray); //lightDirs[i];\n float lightEnabled = float( length(L) > 0.0 );\n L = normalize(L+(1.0-lightEnabled));\n\n // Calculate lighting properties\n float lambertTerm = clamp( dot(N,L), 0.0, 1.0 );\n if (n_bg_borders > 0) {\n // to fix dim pixels due to poor normal estimation,\n // we give a default lambda to pixels surrounded by background\n lambertTerm = 0.5;\n }\n\n // Calculate mask\n float mask1 = lightEnabled;\n\n // Calculate colors\n ambient_color += mask1 * u_ambient; // * gl_LightSource[i].ambient;\n diffuse_color += mask1 * lambertTerm;\n }\n\n // Calculate final color by componing different components\n final_color = betterColor * ( ambient_color + diffuse_color);\n final_color.a = betterColor.a;\n\n // Done\n return final_color;\n}\n\"\"\"\n\nISO_CATEGORICAL_SNIPPETS = dict(\n before_loop=\"\"\"\n vec4 color3 = vec4(0.0); // final color\n vec3 dstep = 1.5 / u_shape; // step to sample derivative, set to match iso shader\n gl_FragColor = vec4(0.0);\n bool discard_fragment = true;\n \"\"\",\n in_loop=\"\"\"\n // check if value is different from the background value\n if ( floatNotEqual(val, categorical_bg_value) ) {\n // Take the last interval in smaller steps\n vec3 iloc = loc - step;\n for (int i=0; i<10; i++) {\n color = $sample(u_volumetex, iloc);\n if (floatNotEqual(color.g, categorical_bg_value) ) {\n // when the non-background value is reached\n // calculate the color (apply lighting effects)\n color = applyColormap(color.g);\n color = calculateCategoricalColor(color, iloc, dstep);\n gl_FragColor = color;\n\n // set the variables for the depth buffer\n frag_depth_point = iloc * u_shape;\n discard_fragment = false;\n\n iter = nsteps;\n break;\n }\n iloc += step * 0.1;\n }\n }\n \"\"\",\n after_loop=\"\"\"\n if (discard_fragment)\n discard;\n \"\"\",\n)\n\nshaders = BaseVolume._shaders.copy()\nbefore, after = shaders['fragment'].split('void main()')\nshaders['fragment'] = before + FUNCTION_DEFINITIONS + 'void main()' + after\n\nrendering_methods = BaseVolume._rendering_methods.copy()\nrendering_methods['iso_categorical'] = ISO_CATEGORICAL_SNIPPETS\n\n\nclass Volume(BaseVolume):\n # add the new rendering method to the snippets dict\n _shaders = shaders\n _rendering_methods = rendering_methods\n", "path": "napari/_vispy/visuals/volume.py"}]}
| 2,291 | 291 |
gh_patches_debug_11689
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-1140
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry fails to install p4python due to read-only files
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: Windows 10
- **Poetry version**: poetry 0.12.2
- **Link of a [pyproject.toml Gist](https://gist.github.com/epage/5f28e3b1e5eeb9a30697363e369a5fde)
- **Link of a [backtrace Gist](https://gist.github.com/epage/2584ad981ff5d9f175d55212b0192987)
## Issue
In digging into the problem, it seems that p4python's files are all marked read-only, causing windows to error when trying to delete them via `shutil.rmtree` which is invoked by poetry's custom temp directory handling.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/utils/helpers.py`
Content:
```
1 import os
2 import re
3 import shutil
4 import stat
5 import tempfile
6
7 from contextlib import contextmanager
8 from typing import List
9 from typing import Optional
10
11 from poetry.config.config import Config
12 from poetry.utils._compat import Path
13 from poetry.version import Version
14
15
16 try:
17 from collections.abc import Mapping
18 except ImportError:
19 from collections import Mapping
20
21
22 _canonicalize_regex = re.compile("[-_]+")
23
24
25 def canonicalize_name(name): # type: (str) -> str
26 return _canonicalize_regex.sub("-", name).lower()
27
28
29 def module_name(name): # type: (str) -> str
30 return canonicalize_name(name).replace(".", "_").replace("-", "_")
31
32
33 def normalize_version(version): # type: (str) -> str
34 return str(Version(version))
35
36
37 @contextmanager
38 def temporary_directory(*args, **kwargs):
39 try:
40 from tempfile import TemporaryDirectory
41
42 with TemporaryDirectory(*args, **kwargs) as name:
43 yield name
44 except ImportError:
45 name = tempfile.mkdtemp(*args, **kwargs)
46
47 yield name
48
49 shutil.rmtree(name)
50
51
52 def parse_requires(requires): # type: (str) -> List[str]
53 lines = requires.split("\n")
54
55 requires_dist = []
56 in_section = False
57 current_marker = None
58 for line in lines:
59 line = line.strip()
60 if not line:
61 if in_section:
62 in_section = False
63
64 continue
65
66 if line.startswith("["):
67 # extras or conditional dependencies
68 marker = line.lstrip("[").rstrip("]")
69 if ":" not in marker:
70 extra, marker = marker, None
71 else:
72 extra, marker = marker.split(":")
73
74 if extra:
75 if marker:
76 marker = '{} and extra == "{}"'.format(marker, extra)
77 else:
78 marker = 'extra == "{}"'.format(extra)
79
80 if marker:
81 current_marker = marker
82
83 continue
84
85 if current_marker:
86 line = "{} ; {}".format(line, current_marker)
87
88 requires_dist.append(line)
89
90 return requires_dist
91
92
93 def get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]
94 cert = config.get("certificates.{}.cert".format(repository_name))
95 if cert:
96 return Path(cert)
97 else:
98 return None
99
100
101 def get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]
102 client_cert = config.get("certificates.{}.client-cert".format(repository_name))
103 if client_cert:
104 return Path(client_cert)
105 else:
106 return None
107
108
109 def _on_rm_error(func, path, exc_info):
110 if not os.path.exists(path):
111 return
112
113 os.chmod(path, stat.S_IWRITE)
114 func(path)
115
116
117 def safe_rmtree(path):
118 if Path(path).is_symlink():
119 return os.unlink(str(path))
120
121 shutil.rmtree(path, onerror=_on_rm_error)
122
123
124 def merge_dicts(d1, d2):
125 for k, v in d2.items():
126 if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):
127 merge_dicts(d1[k], d2[k])
128 else:
129 d1[k] = d2[k]
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/poetry/utils/helpers.py b/poetry/utils/helpers.py
--- a/poetry/utils/helpers.py
+++ b/poetry/utils/helpers.py
@@ -34,19 +34,18 @@
return str(Version(version))
+def _del_ro(action, name, exc):
+ os.chmod(name, stat.S_IWRITE)
+ os.remove(name)
+
+
@contextmanager
def temporary_directory(*args, **kwargs):
- try:
- from tempfile import TemporaryDirectory
-
- with TemporaryDirectory(*args, **kwargs) as name:
- yield name
- except ImportError:
- name = tempfile.mkdtemp(*args, **kwargs)
+ name = tempfile.mkdtemp(*args, **kwargs)
- yield name
+ yield name
- shutil.rmtree(name)
+ shutil.rmtree(name, onerror=_del_ro)
def parse_requires(requires): # type: (str) -> List[str]
|
{"golden_diff": "diff --git a/poetry/utils/helpers.py b/poetry/utils/helpers.py\n--- a/poetry/utils/helpers.py\n+++ b/poetry/utils/helpers.py\n@@ -34,19 +34,18 @@\n return str(Version(version))\n \n \n+def _del_ro(action, name, exc):\n+ os.chmod(name, stat.S_IWRITE)\n+ os.remove(name)\n+\n+\n @contextmanager\n def temporary_directory(*args, **kwargs):\n- try:\n- from tempfile import TemporaryDirectory\n-\n- with TemporaryDirectory(*args, **kwargs) as name:\n- yield name\n- except ImportError:\n- name = tempfile.mkdtemp(*args, **kwargs)\n+ name = tempfile.mkdtemp(*args, **kwargs)\n \n- yield name\n+ yield name\n \n- shutil.rmtree(name)\n+ shutil.rmtree(name, onerror=_del_ro)\n \n \n def parse_requires(requires): # type: (str) -> List[str]\n", "issue": "Poetry fails to install p4python due to read-only files\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Windows 10\r\n- **Poetry version**: poetry 0.12.2\r\n- **Link of a [pyproject.toml Gist](https://gist.github.com/epage/5f28e3b1e5eeb9a30697363e369a5fde)\r\n- **Link of a [backtrace Gist](https://gist.github.com/epage/2584ad981ff5d9f175d55212b0192987)\r\n\r\n## Issue\r\n\r\nIn digging into the problem, it seems that p4python's files are all marked read-only, causing windows to error when trying to delete them via `shutil.rmtree` which is invoked by poetry's custom temp directory handling.\n", "before_files": [{"content": "import os\nimport re\nimport shutil\nimport stat\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom typing import List\nfrom typing import Optional\n\nfrom poetry.config.config import Config\nfrom poetry.utils._compat import Path\nfrom poetry.version import Version\n\n\ntry:\n from collections.abc import Mapping\nexcept ImportError:\n from collections import Mapping\n\n\n_canonicalize_regex = re.compile(\"[-_]+\")\n\n\ndef canonicalize_name(name): # type: (str) -> str\n return _canonicalize_regex.sub(\"-\", name).lower()\n\n\ndef module_name(name): # type: (str) -> str\n return canonicalize_name(name).replace(\".\", \"_\").replace(\"-\", \"_\")\n\n\ndef normalize_version(version): # type: (str) -> str\n return str(Version(version))\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef parse_requires(requires): # type: (str) -> List[str]\n lines = requires.split(\"\\n\")\n\n requires_dist = []\n in_section = False\n current_marker = None\n for line in lines:\n line = line.strip()\n if not line:\n if in_section:\n in_section = False\n\n continue\n\n if line.startswith(\"[\"):\n # extras or conditional dependencies\n marker = line.lstrip(\"[\").rstrip(\"]\")\n if \":\" not in marker:\n extra, marker = marker, None\n else:\n extra, marker = marker.split(\":\")\n\n if extra:\n if marker:\n marker = '{} and extra == \"{}\"'.format(marker, extra)\n else:\n marker = 'extra == \"{}\"'.format(extra)\n\n if marker:\n current_marker = marker\n\n continue\n\n if current_marker:\n line = \"{} ; {}\".format(line, current_marker)\n\n requires_dist.append(line)\n\n return requires_dist\n\n\ndef get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n cert = config.get(\"certificates.{}.cert\".format(repository_name))\n if cert:\n return Path(cert)\n else:\n return None\n\n\ndef get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n client_cert = config.get(\"certificates.{}.client-cert\".format(repository_name))\n if client_cert:\n return Path(client_cert)\n else:\n return None\n\n\ndef _on_rm_error(func, path, exc_info):\n if not os.path.exists(path):\n return\n\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef safe_rmtree(path):\n if Path(path).is_symlink():\n return os.unlink(str(path))\n\n shutil.rmtree(path, onerror=_on_rm_error)\n\n\ndef merge_dicts(d1, d2):\n for k, v in d2.items():\n if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):\n merge_dicts(d1[k], d2[k])\n else:\n d1[k] = d2[k]\n", "path": "poetry/utils/helpers.py"}], "after_files": [{"content": "import os\nimport re\nimport shutil\nimport stat\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom typing import List\nfrom typing import Optional\n\nfrom poetry.config.config import Config\nfrom poetry.utils._compat import Path\nfrom poetry.version import Version\n\n\ntry:\n from collections.abc import Mapping\nexcept ImportError:\n from collections import Mapping\n\n\n_canonicalize_regex = re.compile(\"[-_]+\")\n\n\ndef canonicalize_name(name): # type: (str) -> str\n return _canonicalize_regex.sub(\"-\", name).lower()\n\n\ndef module_name(name): # type: (str) -> str\n return canonicalize_name(name).replace(\".\", \"_\").replace(\"-\", \"_\")\n\n\ndef normalize_version(version): # type: (str) -> str\n return str(Version(version))\n\n\ndef _del_ro(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name, onerror=_del_ro)\n\n\ndef parse_requires(requires): # type: (str) -> List[str]\n lines = requires.split(\"\\n\")\n\n requires_dist = []\n in_section = False\n current_marker = None\n for line in lines:\n line = line.strip()\n if not line:\n if in_section:\n in_section = False\n\n continue\n\n if line.startswith(\"[\"):\n # extras or conditional dependencies\n marker = line.lstrip(\"[\").rstrip(\"]\")\n if \":\" not in marker:\n extra, marker = marker, None\n else:\n extra, marker = marker.split(\":\")\n\n if extra:\n if marker:\n marker = '{} and extra == \"{}\"'.format(marker, extra)\n else:\n marker = 'extra == \"{}\"'.format(extra)\n\n if marker:\n current_marker = marker\n\n continue\n\n if current_marker:\n line = \"{} ; {}\".format(line, current_marker)\n\n requires_dist.append(line)\n\n return requires_dist\n\n\ndef get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n cert = config.get(\"certificates.{}.cert\".format(repository_name))\n if cert:\n return Path(cert)\n else:\n return None\n\n\ndef get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n client_cert = config.get(\"certificates.{}.client-cert\".format(repository_name))\n if client_cert:\n return Path(client_cert)\n else:\n return None\n\n\ndef _on_rm_error(func, path, exc_info):\n if not os.path.exists(path):\n return\n\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef safe_rmtree(path):\n if Path(path).is_symlink():\n return os.unlink(str(path))\n\n shutil.rmtree(path, onerror=_on_rm_error)\n\n\ndef merge_dicts(d1, d2):\n for k, v in d2.items():\n if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):\n merge_dicts(d1[k], d2[k])\n else:\n d1[k] = d2[k]\n", "path": "poetry/utils/helpers.py"}]}
| 1,656 | 214 |
gh_patches_debug_6859
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-7069
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
session could auto-no-op any callbacks invoked after the session is destroyed
Right now, if you do something like this:
```
def update_all_sessions(server_context):
for session_context in server_context.sessions:
yield session_context.with_locked_document(update_document)
```
One of the sessions could expire and be destroyed before your code gets to it.
So you need to write a check for that:
```
def update_all_sessions(server_context):
for session_context in server_context.sessions:
if not session_context.destroyed:
yield session_context.with_locked_document(update_document)
```
I think it would be better if `with_locked_document` did this automatically (just became a no-op on destroyed sessions). This could be done in session.py, `_needs_document_lock_wrapper` perhaps.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/server/session.py`
Content:
```
1 ''' Provides the ``ServerSession`` class.
2
3 '''
4 from __future__ import absolute_import
5
6 import logging
7 log = logging.getLogger(__name__)
8
9 import time
10
11 from tornado import gen, locks
12
13 from ..util.tornado import yield_for_all_futures
14
15 from .callbacks import _DocumentCallbackGroup
16
17 def current_time():
18 '''Return the time in milliseconds since the epoch as a floating
19 point number.
20 '''
21 try:
22 # python >=3.3 only
23 return time.monotonic() * 1000
24 except:
25 # if your python is old, don't set your clock backward!
26 return time.time() * 1000
27
28 def _needs_document_lock(func):
29 '''Decorator that adds the necessary locking and post-processing
30 to manipulate the session's document. Expects to decorate a
31 method on ServerSession and transforms it into a coroutine
32 if it wasn't already.
33 '''
34 @gen.coroutine
35 def _needs_document_lock_wrapper(self, *args, **kwargs):
36 # while we wait for and hold the lock, prevent the session
37 # from being discarded. This avoids potential weirdness
38 # with the session vanishing in the middle of some async
39 # task.
40 self.block_expiration()
41 try:
42 with (yield self._lock.acquire()):
43 if self._pending_writes is not None:
44 raise RuntimeError("internal class invariant violated: _pending_writes " + \
45 "should be None if lock is not held")
46 self._pending_writes = []
47 try:
48 result = yield yield_for_all_futures(func(self, *args, **kwargs))
49 finally:
50 # we want to be very sure we reset this or we'll
51 # keep hitting the RuntimeError above as soon as
52 # any callback goes wrong
53 pending_writes = self._pending_writes
54 self._pending_writes = None
55 for p in pending_writes:
56 yield p
57 raise gen.Return(result)
58 finally:
59 self.unblock_expiration()
60 return _needs_document_lock_wrapper
61
62 class ServerSession(object):
63 ''' Hosts an application "instance" (an instantiated Document) for one or more connections.
64
65 '''
66
67 def __init__(self, session_id, document, io_loop=None):
68 if session_id is None:
69 raise ValueError("Sessions must have an id")
70 if document is None:
71 raise ValueError("Sessions must have a document")
72 self._id = session_id
73 self._document = document
74 self._loop = io_loop
75 self._subscribed_connections = set()
76 self._last_unsubscribe_time = current_time()
77 self._lock = locks.Lock()
78 self._current_patch_connection = None
79 self._document.on_change_dispatch_to(self)
80 self._callbacks = _DocumentCallbackGroup(io_loop)
81 self._pending_writes = None
82 self._destroyed = False
83 self._expiration_requested = False
84 self._expiration_blocked_count = 0
85
86 wrapped_callbacks = self._wrap_session_callbacks(self._document.session_callbacks)
87 self._callbacks.add_session_callbacks(wrapped_callbacks)
88
89 @property
90 def document(self):
91 return self._document
92
93 @property
94 def id(self):
95 return self._id
96
97 @property
98 def destroyed(self):
99 return self._destroyed
100
101 @property
102 def expiration_requested(self):
103 return self._expiration_requested
104
105 @property
106 def expiration_blocked(self):
107 return self._expiration_blocked_count > 0
108
109 @property
110 def expiration_blocked_count(self):
111 return self._expiration_blocked_count
112
113 def destroy(self):
114 self._destroyed = True
115 self._document.delete_modules()
116 self._document.remove_on_change(self)
117 self._callbacks.remove_all_callbacks()
118
119 def request_expiration(self):
120 """ Used in test suite for now. Forces immediate expiration if no connections."""
121 self._expiration_requested = True
122
123 def block_expiration(self):
124 self._expiration_blocked_count += 1
125
126 def unblock_expiration(self):
127 if self._expiration_blocked_count <= 0:
128 raise RuntimeError("mismatched block_expiration / unblock_expiration")
129 self._expiration_blocked_count -= 1
130
131 def subscribe(self, connection):
132 """This should only be called by ServerConnection.subscribe_session or our book-keeping will be broken"""
133 self._subscribed_connections.add(connection)
134
135 def unsubscribe(self, connection):
136 """This should only be called by ServerConnection.unsubscribe_session or our book-keeping will be broken"""
137 self._subscribed_connections.discard(connection)
138 self._last_unsubscribe_time = current_time()
139
140 @property
141 def connection_count(self):
142 return len(self._subscribed_connections)
143
144 @property
145 def milliseconds_since_last_unsubscribe(self):
146 return current_time() - self._last_unsubscribe_time
147
148 @_needs_document_lock
149 def with_document_locked(self, func, *args, **kwargs):
150 ''' Asynchronously locks the document and runs the function with it locked.'''
151 return func(*args, **kwargs)
152
153 def _wrap_document_callback(self, callback):
154 if getattr(callback, "nolock", False):
155 return callback
156 def wrapped_callback(*args, **kwargs):
157 return self.with_document_locked(callback, *args, **kwargs)
158 return wrapped_callback
159
160 def _wrap_session_callback(self, callback):
161 wrapped = self._wrap_document_callback(callback.callback)
162 return callback._copy_with_changed_callback(wrapped)
163
164 def _wrap_session_callbacks(self, callbacks):
165 wrapped = []
166 for cb in callbacks:
167 wrapped.append(self._wrap_session_callback(cb))
168 return wrapped
169
170 def _document_patched(self, event):
171 may_suppress = event.setter is self
172
173 if self._pending_writes is None:
174 raise RuntimeError("_pending_writes should be non-None when we have a document lock, and we should have the lock when the document changes")
175
176 # TODO (havocp): our "change sync" protocol is flawed because if both
177 # sides change the same attribute at the same time, they will each end
178 # up with the state of the other and their final states will differ.
179 for connection in self._subscribed_connections:
180 if may_suppress and connection is self._current_patch_connection:
181 log.trace("Not sending notification back to client %r for a change it requested", connection)
182 else:
183 self._pending_writes.append(connection.send_patch_document(event))
184
185 @_needs_document_lock
186 def _handle_pull(self, message, connection):
187 log.debug("Sending pull-doc-reply from session %r", self.id)
188 return connection.protocol.create('PULL-DOC-REPLY', message.header['msgid'], self.document)
189
190 def _session_callback_added(self, event):
191 wrapped = self._wrap_session_callback(event.callback)
192 self._callbacks.add_session_callback(wrapped)
193
194 def _session_callback_removed(self, event):
195 self._callbacks.remove_session_callback(event.callback)
196
197 @classmethod
198 def pull(cls, message, connection):
199 ''' Handle a PULL-DOC, return a Future with work to be scheduled. '''
200 return connection.session._handle_pull(message, connection)
201
202 @_needs_document_lock
203 def _handle_push(self, message, connection):
204 log.debug("pushing doc to session %r", self.id)
205 message.push_to_document(self.document)
206 return connection.ok(message)
207
208 @classmethod
209 def push(cls, message, connection):
210 ''' Handle a PUSH-DOC, return a Future with work to be scheduled. '''
211 return connection.session._handle_push(message, connection)
212
213 @_needs_document_lock
214 def _handle_patch(self, message, connection):
215 self._current_patch_connection = connection
216 try:
217 message.apply_to_document(self.document, self)
218 finally:
219 self._current_patch_connection = None
220
221 return connection.ok(message)
222
223 @_needs_document_lock
224 def _handle_event(self, message, connection):
225 message.notify_event(self.document)
226 return connection.ok(message)
227
228 @classmethod
229 def event(cls, message, connection):
230 return connection.session._handle_event(message, connection)
231
232
233 @classmethod
234 def patch(cls, message, connection):
235 ''' Handle a PATCH-DOC, return a Future with work to be scheduled. '''
236 return connection.session._handle_patch(message, connection)
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/server/session.py b/bokeh/server/session.py
--- a/bokeh/server/session.py
+++ b/bokeh/server/session.py
@@ -37,6 +37,9 @@
# from being discarded. This avoids potential weirdness
# with the session vanishing in the middle of some async
# task.
+ if self.destroyed:
+ log.debug("Ignoring locked callback on already-destroyed session.")
+ raise gen.Return(None)
self.block_expiration()
try:
with (yield self._lock.acquire()):
|
{"golden_diff": "diff --git a/bokeh/server/session.py b/bokeh/server/session.py\n--- a/bokeh/server/session.py\n+++ b/bokeh/server/session.py\n@@ -37,6 +37,9 @@\n # from being discarded. This avoids potential weirdness\n # with the session vanishing in the middle of some async\n # task.\n+ if self.destroyed:\n+ log.debug(\"Ignoring locked callback on already-destroyed session.\")\n+ raise gen.Return(None)\n self.block_expiration()\n try:\n with (yield self._lock.acquire()):\n", "issue": "session could auto-no-op any callbacks invoked after the session is destroyed\nRight now, if you do something like this:\n\n```\ndef update_all_sessions(server_context):\n for session_context in server_context.sessions:\n yield session_context.with_locked_document(update_document)\n```\n\nOne of the sessions could expire and be destroyed before your code gets to it.\n\nSo you need to write a check for that:\n\n```\ndef update_all_sessions(server_context):\n for session_context in server_context.sessions:\n if not session_context.destroyed:\n yield session_context.with_locked_document(update_document)\n```\n\nI think it would be better if `with_locked_document` did this automatically (just became a no-op on destroyed sessions). This could be done in session.py, `_needs_document_lock_wrapper` perhaps.\n\n", "before_files": [{"content": "''' Provides the ``ServerSession`` class.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport time\n\nfrom tornado import gen, locks\n\nfrom ..util.tornado import yield_for_all_futures\n\nfrom .callbacks import _DocumentCallbackGroup\n\ndef current_time():\n '''Return the time in milliseconds since the epoch as a floating\n point number.\n '''\n try:\n # python >=3.3 only\n return time.monotonic() * 1000\n except:\n # if your python is old, don't set your clock backward!\n return time.time() * 1000\n\ndef _needs_document_lock(func):\n '''Decorator that adds the necessary locking and post-processing\n to manipulate the session's document. Expects to decorate a\n method on ServerSession and transforms it into a coroutine\n if it wasn't already.\n '''\n @gen.coroutine\n def _needs_document_lock_wrapper(self, *args, **kwargs):\n # while we wait for and hold the lock, prevent the session\n # from being discarded. This avoids potential weirdness\n # with the session vanishing in the middle of some async\n # task.\n self.block_expiration()\n try:\n with (yield self._lock.acquire()):\n if self._pending_writes is not None:\n raise RuntimeError(\"internal class invariant violated: _pending_writes \" + \\\n \"should be None if lock is not held\")\n self._pending_writes = []\n try:\n result = yield yield_for_all_futures(func(self, *args, **kwargs))\n finally:\n # we want to be very sure we reset this or we'll\n # keep hitting the RuntimeError above as soon as\n # any callback goes wrong\n pending_writes = self._pending_writes\n self._pending_writes = None\n for p in pending_writes:\n yield p\n raise gen.Return(result)\n finally:\n self.unblock_expiration()\n return _needs_document_lock_wrapper\n\nclass ServerSession(object):\n ''' Hosts an application \"instance\" (an instantiated Document) for one or more connections.\n\n '''\n\n def __init__(self, session_id, document, io_loop=None):\n if session_id is None:\n raise ValueError(\"Sessions must have an id\")\n if document is None:\n raise ValueError(\"Sessions must have a document\")\n self._id = session_id\n self._document = document\n self._loop = io_loop\n self._subscribed_connections = set()\n self._last_unsubscribe_time = current_time()\n self._lock = locks.Lock()\n self._current_patch_connection = None\n self._document.on_change_dispatch_to(self)\n self._callbacks = _DocumentCallbackGroup(io_loop)\n self._pending_writes = None\n self._destroyed = False\n self._expiration_requested = False\n self._expiration_blocked_count = 0\n\n wrapped_callbacks = self._wrap_session_callbacks(self._document.session_callbacks)\n self._callbacks.add_session_callbacks(wrapped_callbacks)\n\n @property\n def document(self):\n return self._document\n\n @property\n def id(self):\n return self._id\n\n @property\n def destroyed(self):\n return self._destroyed\n\n @property\n def expiration_requested(self):\n return self._expiration_requested\n\n @property\n def expiration_blocked(self):\n return self._expiration_blocked_count > 0\n\n @property\n def expiration_blocked_count(self):\n return self._expiration_blocked_count\n\n def destroy(self):\n self._destroyed = True\n self._document.delete_modules()\n self._document.remove_on_change(self)\n self._callbacks.remove_all_callbacks()\n\n def request_expiration(self):\n \"\"\" Used in test suite for now. Forces immediate expiration if no connections.\"\"\"\n self._expiration_requested = True\n\n def block_expiration(self):\n self._expiration_blocked_count += 1\n\n def unblock_expiration(self):\n if self._expiration_blocked_count <= 0:\n raise RuntimeError(\"mismatched block_expiration / unblock_expiration\")\n self._expiration_blocked_count -= 1\n\n def subscribe(self, connection):\n \"\"\"This should only be called by ServerConnection.subscribe_session or our book-keeping will be broken\"\"\"\n self._subscribed_connections.add(connection)\n\n def unsubscribe(self, connection):\n \"\"\"This should only be called by ServerConnection.unsubscribe_session or our book-keeping will be broken\"\"\"\n self._subscribed_connections.discard(connection)\n self._last_unsubscribe_time = current_time()\n\n @property\n def connection_count(self):\n return len(self._subscribed_connections)\n\n @property\n def milliseconds_since_last_unsubscribe(self):\n return current_time() - self._last_unsubscribe_time\n\n @_needs_document_lock\n def with_document_locked(self, func, *args, **kwargs):\n ''' Asynchronously locks the document and runs the function with it locked.'''\n return func(*args, **kwargs)\n\n def _wrap_document_callback(self, callback):\n if getattr(callback, \"nolock\", False):\n return callback\n def wrapped_callback(*args, **kwargs):\n return self.with_document_locked(callback, *args, **kwargs)\n return wrapped_callback\n\n def _wrap_session_callback(self, callback):\n wrapped = self._wrap_document_callback(callback.callback)\n return callback._copy_with_changed_callback(wrapped)\n\n def _wrap_session_callbacks(self, callbacks):\n wrapped = []\n for cb in callbacks:\n wrapped.append(self._wrap_session_callback(cb))\n return wrapped\n\n def _document_patched(self, event):\n may_suppress = event.setter is self\n\n if self._pending_writes is None:\n raise RuntimeError(\"_pending_writes should be non-None when we have a document lock, and we should have the lock when the document changes\")\n\n # TODO (havocp): our \"change sync\" protocol is flawed because if both\n # sides change the same attribute at the same time, they will each end\n # up with the state of the other and their final states will differ.\n for connection in self._subscribed_connections:\n if may_suppress and connection is self._current_patch_connection:\n log.trace(\"Not sending notification back to client %r for a change it requested\", connection)\n else:\n self._pending_writes.append(connection.send_patch_document(event))\n\n @_needs_document_lock\n def _handle_pull(self, message, connection):\n log.debug(\"Sending pull-doc-reply from session %r\", self.id)\n return connection.protocol.create('PULL-DOC-REPLY', message.header['msgid'], self.document)\n\n def _session_callback_added(self, event):\n wrapped = self._wrap_session_callback(event.callback)\n self._callbacks.add_session_callback(wrapped)\n\n def _session_callback_removed(self, event):\n self._callbacks.remove_session_callback(event.callback)\n\n @classmethod\n def pull(cls, message, connection):\n ''' Handle a PULL-DOC, return a Future with work to be scheduled. '''\n return connection.session._handle_pull(message, connection)\n\n @_needs_document_lock\n def _handle_push(self, message, connection):\n log.debug(\"pushing doc to session %r\", self.id)\n message.push_to_document(self.document)\n return connection.ok(message)\n\n @classmethod\n def push(cls, message, connection):\n ''' Handle a PUSH-DOC, return a Future with work to be scheduled. '''\n return connection.session._handle_push(message, connection)\n\n @_needs_document_lock\n def _handle_patch(self, message, connection):\n self._current_patch_connection = connection\n try:\n message.apply_to_document(self.document, self)\n finally:\n self._current_patch_connection = None\n\n return connection.ok(message)\n\n @_needs_document_lock\n def _handle_event(self, message, connection):\n message.notify_event(self.document)\n return connection.ok(message)\n\n @classmethod\n def event(cls, message, connection):\n return connection.session._handle_event(message, connection)\n\n\n @classmethod\n def patch(cls, message, connection):\n ''' Handle a PATCH-DOC, return a Future with work to be scheduled. '''\n return connection.session._handle_patch(message, connection)\n", "path": "bokeh/server/session.py"}], "after_files": [{"content": "''' Provides the ``ServerSession`` class.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport time\n\nfrom tornado import gen, locks\n\nfrom ..util.tornado import yield_for_all_futures\n\nfrom .callbacks import _DocumentCallbackGroup\n\ndef current_time():\n '''Return the time in milliseconds since the epoch as a floating\n point number.\n '''\n try:\n # python >=3.3 only\n return time.monotonic() * 1000\n except:\n # if your python is old, don't set your clock backward!\n return time.time() * 1000\n\ndef _needs_document_lock(func):\n '''Decorator that adds the necessary locking and post-processing\n to manipulate the session's document. Expects to decorate a\n method on ServerSession and transforms it into a coroutine\n if it wasn't already.\n '''\n @gen.coroutine\n def _needs_document_lock_wrapper(self, *args, **kwargs):\n # while we wait for and hold the lock, prevent the session\n # from being discarded. This avoids potential weirdness\n # with the session vanishing in the middle of some async\n # task.\n if self.destroyed:\n log.debug(\"Ignoring locked callback on already-destroyed session.\")\n raise gen.Return(None)\n self.block_expiration()\n try:\n with (yield self._lock.acquire()):\n if self._pending_writes is not None:\n raise RuntimeError(\"internal class invariant violated: _pending_writes \" + \\\n \"should be None if lock is not held\")\n self._pending_writes = []\n try:\n result = yield yield_for_all_futures(func(self, *args, **kwargs))\n finally:\n # we want to be very sure we reset this or we'll\n # keep hitting the RuntimeError above as soon as\n # any callback goes wrong\n pending_writes = self._pending_writes\n self._pending_writes = None\n for p in pending_writes:\n yield p\n raise gen.Return(result)\n finally:\n self.unblock_expiration()\n return _needs_document_lock_wrapper\n\nclass ServerSession(object):\n ''' Hosts an application \"instance\" (an instantiated Document) for one or more connections.\n\n '''\n\n def __init__(self, session_id, document, io_loop=None):\n if session_id is None:\n raise ValueError(\"Sessions must have an id\")\n if document is None:\n raise ValueError(\"Sessions must have a document\")\n self._id = session_id\n self._document = document\n self._loop = io_loop\n self._subscribed_connections = set()\n self._last_unsubscribe_time = current_time()\n self._lock = locks.Lock()\n self._current_patch_connection = None\n self._document.on_change_dispatch_to(self)\n self._callbacks = _DocumentCallbackGroup(io_loop)\n self._pending_writes = None\n self._destroyed = False\n self._expiration_requested = False\n self._expiration_blocked_count = 0\n\n wrapped_callbacks = self._wrap_session_callbacks(self._document.session_callbacks)\n self._callbacks.add_session_callbacks(wrapped_callbacks)\n\n @property\n def document(self):\n return self._document\n\n @property\n def id(self):\n return self._id\n\n @property\n def destroyed(self):\n return self._destroyed\n\n @property\n def expiration_requested(self):\n return self._expiration_requested\n\n @property\n def expiration_blocked(self):\n return self._expiration_blocked_count > 0\n\n @property\n def expiration_blocked_count(self):\n return self._expiration_blocked_count\n\n def destroy(self):\n self._destroyed = True\n self._document.delete_modules()\n self._document.remove_on_change(self)\n self._callbacks.remove_all_callbacks()\n\n def request_expiration(self):\n \"\"\" Used in test suite for now. Forces immediate expiration if no connections.\"\"\"\n self._expiration_requested = True\n\n def block_expiration(self):\n self._expiration_blocked_count += 1\n\n def unblock_expiration(self):\n if self._expiration_blocked_count <= 0:\n raise RuntimeError(\"mismatched block_expiration / unblock_expiration\")\n self._expiration_blocked_count -= 1\n\n def subscribe(self, connection):\n \"\"\"This should only be called by ServerConnection.subscribe_session or our book-keeping will be broken\"\"\"\n self._subscribed_connections.add(connection)\n\n def unsubscribe(self, connection):\n \"\"\"This should only be called by ServerConnection.unsubscribe_session or our book-keeping will be broken\"\"\"\n self._subscribed_connections.discard(connection)\n self._last_unsubscribe_time = current_time()\n\n @property\n def connection_count(self):\n return len(self._subscribed_connections)\n\n @property\n def milliseconds_since_last_unsubscribe(self):\n return current_time() - self._last_unsubscribe_time\n\n @_needs_document_lock\n def with_document_locked(self, func, *args, **kwargs):\n ''' Asynchronously locks the document and runs the function with it locked.'''\n return func(*args, **kwargs)\n\n def _wrap_document_callback(self, callback):\n if getattr(callback, \"nolock\", False):\n return callback\n def wrapped_callback(*args, **kwargs):\n return self.with_document_locked(callback, *args, **kwargs)\n return wrapped_callback\n\n def _wrap_session_callback(self, callback):\n wrapped = self._wrap_document_callback(callback.callback)\n return callback._copy_with_changed_callback(wrapped)\n\n def _wrap_session_callbacks(self, callbacks):\n wrapped = []\n for cb in callbacks:\n wrapped.append(self._wrap_session_callback(cb))\n return wrapped\n\n def _document_patched(self, event):\n may_suppress = event.setter is self\n\n if self._pending_writes is None:\n raise RuntimeError(\"_pending_writes should be non-None when we have a document lock, and we should have the lock when the document changes\")\n\n # TODO (havocp): our \"change sync\" protocol is flawed because if both\n # sides change the same attribute at the same time, they will each end\n # up with the state of the other and their final states will differ.\n for connection in self._subscribed_connections:\n if may_suppress and connection is self._current_patch_connection:\n log.trace(\"Not sending notification back to client %r for a change it requested\", connection)\n else:\n self._pending_writes.append(connection.send_patch_document(event))\n\n @_needs_document_lock\n def _handle_pull(self, message, connection):\n log.debug(\"Sending pull-doc-reply from session %r\", self.id)\n return connection.protocol.create('PULL-DOC-REPLY', message.header['msgid'], self.document)\n\n def _session_callback_added(self, event):\n wrapped = self._wrap_session_callback(event.callback)\n self._callbacks.add_session_callback(wrapped)\n\n def _session_callback_removed(self, event):\n self._callbacks.remove_session_callback(event.callback)\n\n @classmethod\n def pull(cls, message, connection):\n ''' Handle a PULL-DOC, return a Future with work to be scheduled. '''\n return connection.session._handle_pull(message, connection)\n\n @_needs_document_lock\n def _handle_push(self, message, connection):\n log.debug(\"pushing doc to session %r\", self.id)\n message.push_to_document(self.document)\n return connection.ok(message)\n\n @classmethod\n def push(cls, message, connection):\n ''' Handle a PUSH-DOC, return a Future with work to be scheduled. '''\n return connection.session._handle_push(message, connection)\n\n @_needs_document_lock\n def _handle_patch(self, message, connection):\n self._current_patch_connection = connection\n try:\n message.apply_to_document(self.document, self)\n finally:\n self._current_patch_connection = None\n\n return connection.ok(message)\n\n @_needs_document_lock\n def _handle_event(self, message, connection):\n message.notify_event(self.document)\n return connection.ok(message)\n\n @classmethod\n def event(cls, message, connection):\n return connection.session._handle_event(message, connection)\n\n\n @classmethod\n def patch(cls, message, connection):\n ''' Handle a PATCH-DOC, return a Future with work to be scheduled. '''\n return connection.session._handle_patch(message, connection)\n", "path": "bokeh/server/session.py"}]}
| 2,815 | 125 |
gh_patches_debug_23207
|
rasdani/github-patches
|
git_diff
|
getsentry__snuba-1794
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snuba cleanup for sentry onpremise
### Environment
Sentry self-hosted 21.3.0 (based on docker-compose from here https://github.com/getsentry/onpremise/blob/21.3.0/docker-compose.yml)
### Steps to Reproduce
1) Setup all containers and up snuba-cleanup container
2) Check logs for snuba-cleanup: Every 5 minutes in log - `Dropped 0 partitions on None`
It looks like variable CLICKHOUSE_HOST is ignored here
https://github.com/getsentry/snuba/blob/41d7fe76aaf8a594e8f6e84015607dcde3f67ad4/snuba/cli/cleanup.py#L13
After manual run command in container - `snuba cleanup --clickhouse-host CLICKHOUSE_HOST_HERE --dry-run True`
i got `Dropped 0 partitions on CLICKHOUSE_HOST_HERE`
### Expected Result
Pass variable https://github.com/getsentry/onpremise/blob/bdd2686021cfea07507bc07d2756ac34a775c680/docker-compose.yml#L44 into cleanup command
### Actual Result
variable is `None` instead of clickhouse host
I'am not sure, bug this or not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snuba/cli/cleanup.py`
Content:
```
1 from typing import Optional
2
3 import click
4
5 from snuba.clusters.cluster import ClickhouseClientSettings
6 from snuba.datasets.storages import StorageKey
7 from snuba.datasets.storages.factory import get_writable_storage
8 from snuba.environment import setup_logging
9
10
11 @click.command()
12 @click.option(
13 "--clickhouse-host", help="Clickhouse server to write to.",
14 )
15 @click.option(
16 "--clickhouse-port", type=int, help="Clickhouse native port to write to.",
17 )
18 @click.option(
19 "--dry-run",
20 type=bool,
21 default=True,
22 help="If true, only print which partitions would be dropped.",
23 )
24 @click.option(
25 "--storage",
26 "storage_name",
27 default="events",
28 type=click.Choice(["events", "errors", "transactions"]),
29 help="The storage to target",
30 )
31 @click.option("--log-level", help="Logging level to use.")
32 def cleanup(
33 *,
34 clickhouse_host: Optional[str],
35 clickhouse_port: Optional[int],
36 dry_run: bool,
37 storage_name: str,
38 log_level: Optional[str] = None,
39 ) -> None:
40 """
41 Deletes stale partitions for ClickHouse tables
42 """
43
44 setup_logging(log_level)
45
46 from snuba.cleanup import run_cleanup, logger
47 from snuba.clickhouse.native import ClickhousePool
48
49 storage = get_writable_storage(StorageKey(storage_name))
50
51 (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()
52
53 database = storage.get_cluster().get_database()
54
55 if clickhouse_host and clickhouse_port:
56 connection = ClickhousePool(
57 clickhouse_host,
58 clickhouse_port,
59 clickhouse_user,
60 clickhouse_password,
61 database,
62 )
63 elif not storage.get_cluster().is_single_node():
64 raise click.ClickException("Provide ClickHouse host and port for cleanup")
65 else:
66 connection = storage.get_cluster().get_query_connection(
67 ClickhouseClientSettings.CLEANUP
68 )
69
70 num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)
71 logger.info("Dropped %s partitions on %s" % (num_dropped, clickhouse_host))
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snuba/cli/cleanup.py b/snuba/cli/cleanup.py
--- a/snuba/cli/cleanup.py
+++ b/snuba/cli/cleanup.py
@@ -50,7 +50,8 @@
(clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()
- database = storage.get_cluster().get_database()
+ cluster = storage.get_cluster()
+ database = cluster.get_database()
if clickhouse_host and clickhouse_port:
connection = ClickhousePool(
@@ -60,12 +61,12 @@
clickhouse_password,
database,
)
- elif not storage.get_cluster().is_single_node():
+ elif not cluster.is_single_node():
raise click.ClickException("Provide ClickHouse host and port for cleanup")
else:
- connection = storage.get_cluster().get_query_connection(
+ connection = cluster.get_query_connection(
ClickhouseClientSettings.CLEANUP
)
num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)
- logger.info("Dropped %s partitions on %s" % (num_dropped, clickhouse_host))
+ logger.info("Dropped %s partitions on %s" % (num_dropped, cluster))
|
{"golden_diff": "diff --git a/snuba/cli/cleanup.py b/snuba/cli/cleanup.py\n--- a/snuba/cli/cleanup.py\n+++ b/snuba/cli/cleanup.py\n@@ -50,7 +50,8 @@\n \n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n \n- database = storage.get_cluster().get_database()\n+ cluster = storage.get_cluster()\n+ database = cluster.get_database()\n \n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n@@ -60,12 +61,12 @@\n clickhouse_password,\n database,\n )\n- elif not storage.get_cluster().is_single_node():\n+ elif not cluster.is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n- connection = storage.get_cluster().get_query_connection(\n+ connection = cluster.get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n \n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n- logger.info(\"Dropped %s partitions on %s\" % (num_dropped, clickhouse_host))\n+ logger.info(\"Dropped %s partitions on %s\" % (num_dropped, cluster))\n", "issue": "Snuba cleanup for sentry onpremise\n### Environment\r\n\r\nSentry self-hosted 21.3.0 (based on docker-compose from here https://github.com/getsentry/onpremise/blob/21.3.0/docker-compose.yml)\r\n\r\n### Steps to Reproduce\r\n\r\n1) Setup all containers and up snuba-cleanup container\r\n2) Check logs for snuba-cleanup: Every 5 minutes in log - `Dropped 0 partitions on None`\r\nIt looks like variable CLICKHOUSE_HOST is ignored here\r\nhttps://github.com/getsentry/snuba/blob/41d7fe76aaf8a594e8f6e84015607dcde3f67ad4/snuba/cli/cleanup.py#L13\r\nAfter manual run command in container - `snuba cleanup --clickhouse-host CLICKHOUSE_HOST_HERE --dry-run True`\r\ni got `Dropped 0 partitions on CLICKHOUSE_HOST_HERE`\r\n\r\n### Expected Result\r\n\r\nPass variable https://github.com/getsentry/onpremise/blob/bdd2686021cfea07507bc07d2756ac34a775c680/docker-compose.yml#L44 into cleanup command\r\n\r\n### Actual Result\r\n\r\nvariable is `None` instead of clickhouse host\r\n\r\nI'am not sure, bug this or not.\n", "before_files": [{"content": "from typing import Optional\n\nimport click\n\nfrom snuba.clusters.cluster import ClickhouseClientSettings\nfrom snuba.datasets.storages import StorageKey\nfrom snuba.datasets.storages.factory import get_writable_storage\nfrom snuba.environment import setup_logging\n\n\[email protected]()\[email protected](\n \"--clickhouse-host\", help=\"Clickhouse server to write to.\",\n)\[email protected](\n \"--clickhouse-port\", type=int, help=\"Clickhouse native port to write to.\",\n)\[email protected](\n \"--dry-run\",\n type=bool,\n default=True,\n help=\"If true, only print which partitions would be dropped.\",\n)\[email protected](\n \"--storage\",\n \"storage_name\",\n default=\"events\",\n type=click.Choice([\"events\", \"errors\", \"transactions\"]),\n help=\"The storage to target\",\n)\[email protected](\"--log-level\", help=\"Logging level to use.\")\ndef cleanup(\n *,\n clickhouse_host: Optional[str],\n clickhouse_port: Optional[int],\n dry_run: bool,\n storage_name: str,\n log_level: Optional[str] = None,\n) -> None:\n \"\"\"\n Deletes stale partitions for ClickHouse tables\n \"\"\"\n\n setup_logging(log_level)\n\n from snuba.cleanup import run_cleanup, logger\n from snuba.clickhouse.native import ClickhousePool\n\n storage = get_writable_storage(StorageKey(storage_name))\n\n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n\n database = storage.get_cluster().get_database()\n\n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n clickhouse_host,\n clickhouse_port,\n clickhouse_user,\n clickhouse_password,\n database,\n )\n elif not storage.get_cluster().is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n connection = storage.get_cluster().get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n\n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n logger.info(\"Dropped %s partitions on %s\" % (num_dropped, clickhouse_host))\n", "path": "snuba/cli/cleanup.py"}], "after_files": [{"content": "from typing import Optional\n\nimport click\n\nfrom snuba.clusters.cluster import ClickhouseClientSettings\nfrom snuba.datasets.storages import StorageKey\nfrom snuba.datasets.storages.factory import get_writable_storage\nfrom snuba.environment import setup_logging\n\n\[email protected]()\[email protected](\n \"--clickhouse-host\", help=\"Clickhouse server to write to.\",\n)\[email protected](\n \"--clickhouse-port\", type=int, help=\"Clickhouse native port to write to.\",\n)\[email protected](\n \"--dry-run\",\n type=bool,\n default=True,\n help=\"If true, only print which partitions would be dropped.\",\n)\[email protected](\n \"--storage\",\n \"storage_name\",\n default=\"events\",\n type=click.Choice([\"events\", \"errors\", \"transactions\"]),\n help=\"The storage to target\",\n)\[email protected](\"--log-level\", help=\"Logging level to use.\")\ndef cleanup(\n *,\n clickhouse_host: Optional[str],\n clickhouse_port: Optional[int],\n dry_run: bool,\n storage_name: str,\n log_level: Optional[str] = None,\n) -> None:\n \"\"\"\n Deletes stale partitions for ClickHouse tables\n \"\"\"\n\n setup_logging(log_level)\n\n from snuba.cleanup import run_cleanup, logger\n from snuba.clickhouse.native import ClickhousePool\n\n storage = get_writable_storage(StorageKey(storage_name))\n\n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n\n cluster = storage.get_cluster()\n database = cluster.get_database()\n\n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n clickhouse_host,\n clickhouse_port,\n clickhouse_user,\n clickhouse_password,\n database,\n )\n elif not cluster.is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n connection = cluster.get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n\n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n logger.info(\"Dropped %s partitions on %s\" % (num_dropped, cluster))\n", "path": "snuba/cli/cleanup.py"}]}
| 1,154 | 274 |
gh_patches_debug_36708
|
rasdani/github-patches
|
git_diff
|
ros__ros_comm-1695
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Topic Statistics reports 0 for topics under 1Hz
When `/enable_statistics` parameter is `true`, Subscribers publish to the `/statistics` topic. For a topic that has an expected 0.2Hz publishing frequency, this feature always reports `msg.mean_period` as 0, no matter how much time is given.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `clients/rospy/src/rospy/impl/statistics.py`
Content:
```
1 # Software License Agreement (BSD License)
2 #
3 # Copyright (c) 2013-2014 Dariush Forouher
4 # All rights reserved.
5 #
6 # Based on code adapted from diagnostics_updater by Blaise Gassend
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions
10 # are met:
11 #
12 # * Redistributions of source code must retain the above copyright
13 # notice, this list of conditions and the following disclaimer.
14 # * Redistributions in binary form must reproduce the above
15 # copyright notice, this list of conditions and the following
16 # disclaimer in the documentation and/or other materials provided
17 # with the distribution.
18 # * Neither the name of Willow Garage, Inc. nor the names of its
19 # contributors may be used to endorse or promote products derived
20 # from this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 # POSSIBILITY OF SUCH DAMAGE.
34
35 from math import sqrt
36 import logging
37 import sys
38
39 from rosgraph_msgs.msg import TopicStatistics
40 import rospy
41
42 _logger = logging.getLogger('rospy.impl.statistics')
43
44
45 class SubscriberStatisticsLogger():
46 """
47 Class that monitors each subscriber.
48
49 this class basically just keeps a collection of ConnectionStatisticsLogger.
50 """
51
52 @classmethod
53 def is_enabled(cls):
54 # disable statistics if node can't talk to parameter server
55 # which is the case in unit tests
56 try:
57 return rospy.get_param("/enable_statistics", False)
58 except Exception:
59 return False
60
61 def __init__(self, subscriber):
62 self.subscriber_name = subscriber.name
63 self.connections = dict()
64 self.read_parameters()
65
66 def read_parameters(self):
67 """
68 Fetch window parameters from parameter server
69 """
70
71 # Range of window length, in seconds
72 self.min_elements = rospy.get_param("/statistics_window_min_elements", 10)
73 self.max_elements = rospy.get_param("/statistics_window_max_elements", 100)
74
75 # Range of acceptable messages in window.
76 # Window size will be adjusted if number of observed is
77 # outside this range.
78 self.max_window = rospy.get_param("/statistics_window_max_size", 64)
79 self.min_window = rospy.get_param("/statistics_window_min_size", 4)
80
81 def callback(self, msg, publisher, stat_bytes):
82 """
83 This method is called for every message that has been received.
84
85 @param msg: The message received.
86 @param publisher: The name of the publisher node that sent the msg
87 @param stat_bytes: A counter, how many bytes have been moved across
88 this connection since it exists.
89
90 This method just looks up the ConnectionStatisticsLogger for the specific connection
91 between publisher and subscriber and delegates to statistics logging to that
92 instance.
93 """
94
95 # /clock is special, as it is subscribed very early
96 # also exclude /statistics to reduce noise.
97 if self.subscriber_name == "/clock" or self.subscriber_name == "/statistics":
98 return
99
100 try:
101 # create ConnectionStatisticsLogger for new connections
102 logger = self.connections.get(publisher)
103 if logger is None:
104 logger = ConnectionStatisticsLogger(self.subscriber_name, rospy.get_name(), publisher)
105 self.connections[publisher] = logger
106
107 # delegate stuff to that instance
108 logger.callback(self, msg, stat_bytes)
109 except Exception as e:
110 rospy.logerr("Unexpected error during statistics measurement: %s", str(e))
111
112 def shutdown(self):
113 for logger in self.connections.values():
114 logger.shutdown()
115 self.connections.clear()
116
117
118 class ConnectionStatisticsLogger():
119 """
120 Class that monitors lots of stuff for each connection.
121
122 is created whenever a subscriber is created.
123 is destroyed whenever its parent subscriber is destroyed.
124 its lifecycle is therefore bound to its parent subscriber.
125 """
126
127 def __init__(self, topic, subscriber, publisher):
128 """
129 Constructor.
130
131 @param topic: Name of the topic
132 @param subscriber: Name of the subscriber
133 @param publisher: Name of the publisher
134
135 These three should uniquely identify the connection.
136 """
137
138 self.topic = topic
139 self.subscriber = subscriber
140 self.publisher = publisher
141
142 self.pub = rospy.Publisher("/statistics", TopicStatistics, queue_size=10)
143
144 # reset window
145 self.last_pub_time = rospy.Time(0)
146 self.pub_frequency = rospy.Duration(1.0)
147
148 # timestamp age
149 self.age_list_ = []
150
151 # period calculations
152 self.arrival_time_list_ = []
153
154 self.last_seq_ = 0
155 self.dropped_msgs_ = 0
156 self.window_start = rospy.Time.now()
157
158 # temporary variables
159 self.stat_bytes_last_ = 0
160 self.stat_bytes_window_ = 0
161
162 def sendStatistics(self, subscriber_statistics_logger):
163 """
164 Send out statistics. Aggregate collected stats information.
165
166 Currently done blocking. Might be moved to own thread later. But at the moment
167 any computation done here should be rather quick.
168 """
169 curtime = rospy.Time.now()
170
171 msg = TopicStatistics()
172 msg.topic = self.topic
173 msg.node_sub = self.subscriber
174 msg.node_pub = self.publisher
175
176 msg.window_start = self.window_start
177 msg.window_stop = curtime
178
179 # Calculate bytes since last message
180 msg.traffic = self.stat_bytes_window_ - self.stat_bytes_last_
181
182 msg.delivered_msgs = len(self.arrival_time_list_)
183 msg.dropped_msgs = self.dropped_msgs_
184
185 # we can only calculate message age if the messages did contain Header fields.
186 if len(self.age_list_) > 0:
187 msg.stamp_age_mean = rospy.Duration(sum(self.age_list_, rospy.Duration(0)).to_sec() / len(self.age_list_))
188 variance = sum((rospy.Duration((msg.stamp_age_mean - value).to_sec() ** 2) for value in self.age_list_), rospy.Duration(0)) / len(self.age_list_)
189 msg.stamp_age_stddev = rospy.Duration(sqrt(variance.to_sec()))
190 msg.stamp_age_max = max(self.age_list_)
191 else:
192 msg.stamp_age_mean = rospy.Duration(0)
193 msg.stamp_age_stddev = rospy.Duration(0)
194 msg.stamp_age_max = rospy.Duration(0)
195
196 # computer period/frequency. we need at least two messages within the window to do this.
197 if len(self.arrival_time_list_) > 1:
198 periods = [j - i for i, j in zip(self.arrival_time_list_[:-1], self.arrival_time_list_[1:])]
199 msg.period_mean = rospy.Duration(sum(periods, rospy.Duration(0)).to_sec() / len(periods))
200 variance = sum((rospy.Duration((msg.period_mean - value).to_sec() ** 2) for value in periods), rospy.Duration(0)) / len(periods)
201 msg.period_stddev = rospy.Duration(sqrt(variance.to_sec()))
202 msg.period_max = max(periods)
203 else:
204 msg.period_mean = rospy.Duration(0)
205 msg.period_stddev = rospy.Duration(0)
206 msg.period_max = rospy.Duration(0)
207
208 self.pub.publish(msg)
209
210 # adjust window, if message count is not appropriate.
211 if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:
212 self.pub_frequency *= 2
213 if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:
214 self.pub_frequency /= 2
215
216 # clear collected stats, start new window.
217 self.age_list_ = []
218 self.arrival_time_list_ = []
219 self.dropped_msgs_ = 0
220
221 self.window_start = curtime
222
223 self.stat_bytes_last_ = self.stat_bytes_window_
224
225 def callback(self, subscriber_statistics_logger, msg, stat_bytes):
226 """
227 This method is called for every message, that is received on this
228 subscriber.
229
230 this callback will keep some statistics and publish the results
231 periodically on a topic. the publishing should probably be done
232 asynchronically in another thread.
233
234 @param msg: The message, that has been received. The message has usually
235 been already deserialized. However this is not always the case. (AnyMsg)
236 @param stat_bytes: A counter, how many bytes have been moved across
237 this connection since it exists.
238
239 Any computing-heavy stuff should be done somewhere else, as this
240 callback has to return before the message is delivered to the user.
241 """
242
243 arrival_time = rospy.Time.now()
244
245 self.arrival_time_list_.append(arrival_time)
246
247 self.stat_bytes_window_ = stat_bytes
248
249 # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.
250 # Those subscribers won't have a header. But as these subscribers are rather rare
251 # ("rostopic hz" is the only one I know of), I'm gonna ignore them.
252 if msg._has_header:
253 self.age_list_.append(arrival_time - msg.header.stamp)
254
255 if self.last_seq_ + 1 != msg.header.seq:
256 self.dropped_msgs_ = self.dropped_msgs_ + 1
257 self.last_seq_ = msg.header.seq
258
259 # send out statistics with a certain frequency
260 if self.last_pub_time + self.pub_frequency < arrival_time:
261 self.last_pub_time = arrival_time
262 self.sendStatistics(subscriber_statistics_logger)
263
264 def shutdown(self):
265 self.pub.unregister()
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/clients/rospy/src/rospy/impl/statistics.py b/clients/rospy/src/rospy/impl/statistics.py
--- a/clients/rospy/src/rospy/impl/statistics.py
+++ b/clients/rospy/src/rospy/impl/statistics.py
@@ -68,15 +68,15 @@
Fetch window parameters from parameter server
"""
- # Range of window length, in seconds
- self.min_elements = rospy.get_param("/statistics_window_min_elements", 10)
- self.max_elements = rospy.get_param("/statistics_window_max_elements", 100)
-
# Range of acceptable messages in window.
# Window size will be adjusted if number of observed is
# outside this range.
- self.max_window = rospy.get_param("/statistics_window_max_size", 64)
+ self.min_elements = rospy.get_param("/statistics_window_min_elements", 10)
+ self.max_elements = rospy.get_param("/statistics_window_max_elements", 100)
+
+ # Range of window length, in seconds
self.min_window = rospy.get_param("/statistics_window_min_size", 4)
+ self.max_window = rospy.get_param("/statistics_window_max_size", 64)
def callback(self, msg, publisher, stat_bytes):
"""
@@ -208,9 +208,10 @@
self.pub.publish(msg)
# adjust window, if message count is not appropriate.
- if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:
+ pub_period = 1.0 / self.pub_frequency.to_sec()
+ if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and pub_period / 2 >= subscriber_statistics_logger.min_window:
self.pub_frequency *= 2
- if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:
+ if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and pub_period * 2 <= subscriber_statistics_logger.max_window:
self.pub_frequency /= 2
# clear collected stats, start new window.
@@ -257,7 +258,7 @@
self.last_seq_ = msg.header.seq
# send out statistics with a certain frequency
- if self.last_pub_time + self.pub_frequency < arrival_time:
+ if self.last_pub_time + rospy.Duration(1.0 / self.pub_frequency.to_sec()) < arrival_time:
self.last_pub_time = arrival_time
self.sendStatistics(subscriber_statistics_logger)
|
{"golden_diff": "diff --git a/clients/rospy/src/rospy/impl/statistics.py b/clients/rospy/src/rospy/impl/statistics.py\n--- a/clients/rospy/src/rospy/impl/statistics.py\n+++ b/clients/rospy/src/rospy/impl/statistics.py\n@@ -68,15 +68,15 @@\n Fetch window parameters from parameter server\n \"\"\"\n \n- # Range of window length, in seconds\n- self.min_elements = rospy.get_param(\"/statistics_window_min_elements\", 10)\n- self.max_elements = rospy.get_param(\"/statistics_window_max_elements\", 100)\n-\n # Range of acceptable messages in window.\n # Window size will be adjusted if number of observed is\n # outside this range.\n- self.max_window = rospy.get_param(\"/statistics_window_max_size\", 64)\n+ self.min_elements = rospy.get_param(\"/statistics_window_min_elements\", 10)\n+ self.max_elements = rospy.get_param(\"/statistics_window_max_elements\", 100)\n+\n+ # Range of window length, in seconds\n self.min_window = rospy.get_param(\"/statistics_window_min_size\", 4)\n+ self.max_window = rospy.get_param(\"/statistics_window_max_size\", 64)\n \n def callback(self, msg, publisher, stat_bytes):\n \"\"\"\n@@ -208,9 +208,10 @@\n self.pub.publish(msg)\n \n # adjust window, if message count is not appropriate.\n- if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:\n+ pub_period = 1.0 / self.pub_frequency.to_sec()\n+ if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and pub_period / 2 >= subscriber_statistics_logger.min_window:\n self.pub_frequency *= 2\n- if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:\n+ if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and pub_period * 2 <= subscriber_statistics_logger.max_window:\n self.pub_frequency /= 2\n \n # clear collected stats, start new window.\n@@ -257,7 +258,7 @@\n self.last_seq_ = msg.header.seq\n \n # send out statistics with a certain frequency\n- if self.last_pub_time + self.pub_frequency < arrival_time:\n+ if self.last_pub_time + rospy.Duration(1.0 / self.pub_frequency.to_sec()) < arrival_time:\n self.last_pub_time = arrival_time\n self.sendStatistics(subscriber_statistics_logger)\n", "issue": "Topic Statistics reports 0 for topics under 1Hz\nWhen `/enable_statistics` parameter is `true`, Subscribers publish to the `/statistics` topic. For a topic that has an expected 0.2Hz publishing frequency, this feature always reports `msg.mean_period` as 0, no matter how much time is given.\n", "before_files": [{"content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2013-2014 Dariush Forouher\n# All rights reserved.\n#\n# Based on code adapted from diagnostics_updater by Blaise Gassend\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom math import sqrt\nimport logging\nimport sys\n\nfrom rosgraph_msgs.msg import TopicStatistics\nimport rospy\n\n_logger = logging.getLogger('rospy.impl.statistics')\n\n\nclass SubscriberStatisticsLogger():\n \"\"\"\n Class that monitors each subscriber.\n\n this class basically just keeps a collection of ConnectionStatisticsLogger.\n \"\"\"\n\n @classmethod\n def is_enabled(cls):\n # disable statistics if node can't talk to parameter server\n # which is the case in unit tests\n try:\n return rospy.get_param(\"/enable_statistics\", False)\n except Exception:\n return False\n\n def __init__(self, subscriber):\n self.subscriber_name = subscriber.name\n self.connections = dict()\n self.read_parameters()\n\n def read_parameters(self):\n \"\"\"\n Fetch window parameters from parameter server\n \"\"\"\n\n # Range of window length, in seconds\n self.min_elements = rospy.get_param(\"/statistics_window_min_elements\", 10)\n self.max_elements = rospy.get_param(\"/statistics_window_max_elements\", 100)\n\n # Range of acceptable messages in window.\n # Window size will be adjusted if number of observed is\n # outside this range.\n self.max_window = rospy.get_param(\"/statistics_window_max_size\", 64)\n self.min_window = rospy.get_param(\"/statistics_window_min_size\", 4)\n\n def callback(self, msg, publisher, stat_bytes):\n \"\"\"\n This method is called for every message that has been received.\n\n @param msg: The message received.\n @param publisher: The name of the publisher node that sent the msg\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n This method just looks up the ConnectionStatisticsLogger for the specific connection\n between publisher and subscriber and delegates to statistics logging to that\n instance.\n \"\"\"\n\n # /clock is special, as it is subscribed very early\n # also exclude /statistics to reduce noise.\n if self.subscriber_name == \"/clock\" or self.subscriber_name == \"/statistics\":\n return\n\n try:\n # create ConnectionStatisticsLogger for new connections\n logger = self.connections.get(publisher)\n if logger is None:\n logger = ConnectionStatisticsLogger(self.subscriber_name, rospy.get_name(), publisher)\n self.connections[publisher] = logger\n\n # delegate stuff to that instance\n logger.callback(self, msg, stat_bytes)\n except Exception as e:\n rospy.logerr(\"Unexpected error during statistics measurement: %s\", str(e))\n\n def shutdown(self):\n for logger in self.connections.values():\n logger.shutdown()\n self.connections.clear()\n\n\nclass ConnectionStatisticsLogger():\n \"\"\"\n Class that monitors lots of stuff for each connection.\n\n is created whenever a subscriber is created.\n is destroyed whenever its parent subscriber is destroyed.\n its lifecycle is therefore bound to its parent subscriber.\n \"\"\"\n\n def __init__(self, topic, subscriber, publisher):\n \"\"\"\n Constructor.\n\n @param topic: Name of the topic\n @param subscriber: Name of the subscriber\n @param publisher: Name of the publisher\n\n These three should uniquely identify the connection.\n \"\"\"\n\n self.topic = topic\n self.subscriber = subscriber\n self.publisher = publisher\n\n self.pub = rospy.Publisher(\"/statistics\", TopicStatistics, queue_size=10)\n\n # reset window\n self.last_pub_time = rospy.Time(0)\n self.pub_frequency = rospy.Duration(1.0)\n\n # timestamp age\n self.age_list_ = []\n\n # period calculations\n self.arrival_time_list_ = []\n\n self.last_seq_ = 0\n self.dropped_msgs_ = 0\n self.window_start = rospy.Time.now()\n\n # temporary variables\n self.stat_bytes_last_ = 0\n self.stat_bytes_window_ = 0\n\n def sendStatistics(self, subscriber_statistics_logger):\n \"\"\"\n Send out statistics. Aggregate collected stats information.\n\n Currently done blocking. Might be moved to own thread later. But at the moment\n any computation done here should be rather quick.\n \"\"\"\n curtime = rospy.Time.now()\n\n msg = TopicStatistics()\n msg.topic = self.topic\n msg.node_sub = self.subscriber\n msg.node_pub = self.publisher\n\n msg.window_start = self.window_start\n msg.window_stop = curtime\n\n # Calculate bytes since last message\n msg.traffic = self.stat_bytes_window_ - self.stat_bytes_last_\n\n msg.delivered_msgs = len(self.arrival_time_list_)\n msg.dropped_msgs = self.dropped_msgs_\n\n # we can only calculate message age if the messages did contain Header fields.\n if len(self.age_list_) > 0:\n msg.stamp_age_mean = rospy.Duration(sum(self.age_list_, rospy.Duration(0)).to_sec() / len(self.age_list_))\n variance = sum((rospy.Duration((msg.stamp_age_mean - value).to_sec() ** 2) for value in self.age_list_), rospy.Duration(0)) / len(self.age_list_)\n msg.stamp_age_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.stamp_age_max = max(self.age_list_)\n else:\n msg.stamp_age_mean = rospy.Duration(0)\n msg.stamp_age_stddev = rospy.Duration(0)\n msg.stamp_age_max = rospy.Duration(0)\n\n # computer period/frequency. we need at least two messages within the window to do this.\n if len(self.arrival_time_list_) > 1:\n periods = [j - i for i, j in zip(self.arrival_time_list_[:-1], self.arrival_time_list_[1:])]\n msg.period_mean = rospy.Duration(sum(periods, rospy.Duration(0)).to_sec() / len(periods))\n variance = sum((rospy.Duration((msg.period_mean - value).to_sec() ** 2) for value in periods), rospy.Duration(0)) / len(periods)\n msg.period_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.period_max = max(periods)\n else:\n msg.period_mean = rospy.Duration(0)\n msg.period_stddev = rospy.Duration(0)\n msg.period_max = rospy.Duration(0)\n\n self.pub.publish(msg)\n\n # adjust window, if message count is not appropriate.\n if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and self.pub_frequency.to_sec() * 2 <= subscriber_statistics_logger.max_window:\n self.pub_frequency *= 2\n if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and self.pub_frequency.to_sec() / 2 >= subscriber_statistics_logger.min_window:\n self.pub_frequency /= 2\n\n # clear collected stats, start new window.\n self.age_list_ = []\n self.arrival_time_list_ = []\n self.dropped_msgs_ = 0\n\n self.window_start = curtime\n\n self.stat_bytes_last_ = self.stat_bytes_window_\n\n def callback(self, subscriber_statistics_logger, msg, stat_bytes):\n \"\"\"\n This method is called for every message, that is received on this\n subscriber.\n\n this callback will keep some statistics and publish the results\n periodically on a topic. the publishing should probably be done\n asynchronically in another thread.\n\n @param msg: The message, that has been received. The message has usually\n been already deserialized. However this is not always the case. (AnyMsg)\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n Any computing-heavy stuff should be done somewhere else, as this\n callback has to return before the message is delivered to the user.\n \"\"\"\n\n arrival_time = rospy.Time.now()\n\n self.arrival_time_list_.append(arrival_time)\n\n self.stat_bytes_window_ = stat_bytes\n\n # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.\n # Those subscribers won't have a header. But as these subscribers are rather rare\n # (\"rostopic hz\" is the only one I know of), I'm gonna ignore them.\n if msg._has_header:\n self.age_list_.append(arrival_time - msg.header.stamp)\n\n if self.last_seq_ + 1 != msg.header.seq:\n self.dropped_msgs_ = self.dropped_msgs_ + 1\n self.last_seq_ = msg.header.seq\n\n # send out statistics with a certain frequency\n if self.last_pub_time + self.pub_frequency < arrival_time:\n self.last_pub_time = arrival_time\n self.sendStatistics(subscriber_statistics_logger)\n\n def shutdown(self):\n self.pub.unregister()\n", "path": "clients/rospy/src/rospy/impl/statistics.py"}], "after_files": [{"content": "# Software License Agreement (BSD License)\n#\n# Copyright (c) 2013-2014 Dariush Forouher\n# All rights reserved.\n#\n# Based on code adapted from diagnostics_updater by Blaise Gassend\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Willow Garage, Inc. nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom math import sqrt\nimport logging\nimport sys\n\nfrom rosgraph_msgs.msg import TopicStatistics\nimport rospy\n\n_logger = logging.getLogger('rospy.impl.statistics')\n\n\nclass SubscriberStatisticsLogger():\n \"\"\"\n Class that monitors each subscriber.\n\n this class basically just keeps a collection of ConnectionStatisticsLogger.\n \"\"\"\n\n @classmethod\n def is_enabled(cls):\n # disable statistics if node can't talk to parameter server\n # which is the case in unit tests\n try:\n return rospy.get_param(\"/enable_statistics\", False)\n except Exception:\n return False\n\n def __init__(self, subscriber):\n self.subscriber_name = subscriber.name\n self.connections = dict()\n self.read_parameters()\n\n def read_parameters(self):\n \"\"\"\n Fetch window parameters from parameter server\n \"\"\"\n\n # Range of acceptable messages in window.\n # Window size will be adjusted if number of observed is\n # outside this range.\n self.min_elements = rospy.get_param(\"/statistics_window_min_elements\", 10)\n self.max_elements = rospy.get_param(\"/statistics_window_max_elements\", 100)\n\n # Range of window length, in seconds\n self.min_window = rospy.get_param(\"/statistics_window_min_size\", 4)\n self.max_window = rospy.get_param(\"/statistics_window_max_size\", 64)\n\n def callback(self, msg, publisher, stat_bytes):\n \"\"\"\n This method is called for every message that has been received.\n\n @param msg: The message received.\n @param publisher: The name of the publisher node that sent the msg\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n This method just looks up the ConnectionStatisticsLogger for the specific connection\n between publisher and subscriber and delegates to statistics logging to that\n instance.\n \"\"\"\n\n # /clock is special, as it is subscribed very early\n # also exclude /statistics to reduce noise.\n if self.subscriber_name == \"/clock\" or self.subscriber_name == \"/statistics\":\n return\n\n try:\n # create ConnectionStatisticsLogger for new connections\n logger = self.connections.get(publisher)\n if logger is None:\n logger = ConnectionStatisticsLogger(self.subscriber_name, rospy.get_name(), publisher)\n self.connections[publisher] = logger\n\n # delegate stuff to that instance\n logger.callback(self, msg, stat_bytes)\n except Exception as e:\n rospy.logerr(\"Unexpected error during statistics measurement: %s\", str(e))\n\n def shutdown(self):\n for logger in self.connections.values():\n logger.shutdown()\n self.connections.clear()\n\n\nclass ConnectionStatisticsLogger():\n \"\"\"\n Class that monitors lots of stuff for each connection.\n\n is created whenever a subscriber is created.\n is destroyed whenever its parent subscriber is destroyed.\n its lifecycle is therefore bound to its parent subscriber.\n \"\"\"\n\n def __init__(self, topic, subscriber, publisher):\n \"\"\"\n Constructor.\n\n @param topic: Name of the topic\n @param subscriber: Name of the subscriber\n @param publisher: Name of the publisher\n\n These three should uniquely identify the connection.\n \"\"\"\n\n self.topic = topic\n self.subscriber = subscriber\n self.publisher = publisher\n\n self.pub = rospy.Publisher(\"/statistics\", TopicStatistics, queue_size=10)\n\n # reset window\n self.last_pub_time = rospy.Time(0)\n self.pub_frequency = rospy.Duration(1.0)\n\n # timestamp age\n self.age_list_ = []\n\n # period calculations\n self.arrival_time_list_ = []\n\n self.last_seq_ = 0\n self.dropped_msgs_ = 0\n self.window_start = rospy.Time.now()\n\n # temporary variables\n self.stat_bytes_last_ = 0\n self.stat_bytes_window_ = 0\n\n def sendStatistics(self, subscriber_statistics_logger):\n \"\"\"\n Send out statistics. Aggregate collected stats information.\n\n Currently done blocking. Might be moved to own thread later. But at the moment\n any computation done here should be rather quick.\n \"\"\"\n curtime = rospy.Time.now()\n\n msg = TopicStatistics()\n msg.topic = self.topic\n msg.node_sub = self.subscriber\n msg.node_pub = self.publisher\n\n msg.window_start = self.window_start\n msg.window_stop = curtime\n\n # Calculate bytes since last message\n msg.traffic = self.stat_bytes_window_ - self.stat_bytes_last_\n\n msg.delivered_msgs = len(self.arrival_time_list_)\n msg.dropped_msgs = self.dropped_msgs_\n\n # we can only calculate message age if the messages did contain Header fields.\n if len(self.age_list_) > 0:\n msg.stamp_age_mean = rospy.Duration(sum(self.age_list_, rospy.Duration(0)).to_sec() / len(self.age_list_))\n variance = sum((rospy.Duration((msg.stamp_age_mean - value).to_sec() ** 2) for value in self.age_list_), rospy.Duration(0)) / len(self.age_list_)\n msg.stamp_age_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.stamp_age_max = max(self.age_list_)\n else:\n msg.stamp_age_mean = rospy.Duration(0)\n msg.stamp_age_stddev = rospy.Duration(0)\n msg.stamp_age_max = rospy.Duration(0)\n\n # computer period/frequency. we need at least two messages within the window to do this.\n if len(self.arrival_time_list_) > 1:\n periods = [j - i for i, j in zip(self.arrival_time_list_[:-1], self.arrival_time_list_[1:])]\n msg.period_mean = rospy.Duration(sum(periods, rospy.Duration(0)).to_sec() / len(periods))\n variance = sum((rospy.Duration((msg.period_mean - value).to_sec() ** 2) for value in periods), rospy.Duration(0)) / len(periods)\n msg.period_stddev = rospy.Duration(sqrt(variance.to_sec()))\n msg.period_max = max(periods)\n else:\n msg.period_mean = rospy.Duration(0)\n msg.period_stddev = rospy.Duration(0)\n msg.period_max = rospy.Duration(0)\n\n self.pub.publish(msg)\n\n # adjust window, if message count is not appropriate.\n pub_period = 1.0 / self.pub_frequency.to_sec()\n if len(self.arrival_time_list_) > subscriber_statistics_logger.max_elements and pub_period / 2 >= subscriber_statistics_logger.min_window:\n self.pub_frequency *= 2\n if len(self.arrival_time_list_) < subscriber_statistics_logger.min_elements and pub_period * 2 <= subscriber_statistics_logger.max_window:\n self.pub_frequency /= 2\n\n # clear collected stats, start new window.\n self.age_list_ = []\n self.arrival_time_list_ = []\n self.dropped_msgs_ = 0\n\n self.window_start = curtime\n\n self.stat_bytes_last_ = self.stat_bytes_window_\n\n def callback(self, subscriber_statistics_logger, msg, stat_bytes):\n \"\"\"\n This method is called for every message, that is received on this\n subscriber.\n\n this callback will keep some statistics and publish the results\n periodically on a topic. the publishing should probably be done\n asynchronically in another thread.\n\n @param msg: The message, that has been received. The message has usually\n been already deserialized. However this is not always the case. (AnyMsg)\n @param stat_bytes: A counter, how many bytes have been moved across\n this connection since it exists.\n\n Any computing-heavy stuff should be done somewhere else, as this\n callback has to return before the message is delivered to the user.\n \"\"\"\n\n arrival_time = rospy.Time.now()\n\n self.arrival_time_list_.append(arrival_time)\n\n self.stat_bytes_window_ = stat_bytes\n\n # rospy has the feature to subscribe a topic with AnyMsg which aren't deserialized.\n # Those subscribers won't have a header. But as these subscribers are rather rare\n # (\"rostopic hz\" is the only one I know of), I'm gonna ignore them.\n if msg._has_header:\n self.age_list_.append(arrival_time - msg.header.stamp)\n\n if self.last_seq_ + 1 != msg.header.seq:\n self.dropped_msgs_ = self.dropped_msgs_ + 1\n self.last_seq_ = msg.header.seq\n\n # send out statistics with a certain frequency\n if self.last_pub_time + rospy.Duration(1.0 / self.pub_frequency.to_sec()) < arrival_time:\n self.last_pub_time = arrival_time\n self.sendStatistics(subscriber_statistics_logger)\n\n def shutdown(self):\n self.pub.unregister()\n", "path": "clients/rospy/src/rospy/impl/statistics.py"}]}
| 3,249 | 585 |
gh_patches_debug_11354
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-2523
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nikola generates invalid html5 when markdown footnote extension is used
The [default output type](http://pythonhosted.org/Markdown/reference.html#markdown) of the python markdown library is xhtml1. The 4 templates that ship with Nikola have <!DOCTYPE html> which defines them as html5, so I'm assuming that we're intending to generate html5.
When the footnote markdown extension is used, it generates invalid html5 according to the w3c validator.
`<a class="footnote-ref" href="..." rev="footnote">...</a>`
(rev="footnote" is valid html4, but not html5)
The markdown library indicates that this is invalid html5 (https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/footnotes.py#L149) so we can trigger the correct behaviour by setting the output_format.
Given the markdown library does not make much use of the output_format variable, I don't think this is likely to materially change the output for many people at all - https://github.com/waylan/Python-Markdown/search?utf8=%E2%9C%93&q=output_format)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/markdown/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Implementation of compile_html based on markdown."""
28
29 from __future__ import unicode_literals
30
31 import io
32 import os
33
34 try:
35 from markdown import markdown
36 except ImportError:
37 markdown = None # NOQA
38 nikola_extension = None
39 gist_extension = None
40 podcast_extension = None
41
42 from nikola.plugin_categories import PageCompiler
43 from nikola.utils import makedirs, req_missing, write_metadata
44
45
46 class CompileMarkdown(PageCompiler):
47 """Compile Markdown into HTML."""
48
49 name = "markdown"
50 friendly_name = "Markdown"
51 demote_headers = True
52 extensions = []
53 site = None
54
55 def set_site(self, site):
56 """Set Nikola site."""
57 super(CompileMarkdown, self).set_site(site)
58 self.config_dependencies = []
59 for plugin_info in self.get_compiler_extensions():
60 self.config_dependencies.append(plugin_info.name)
61 self.extensions.append(plugin_info.plugin_object)
62 plugin_info.plugin_object.short_help = plugin_info.description
63
64 self.config_dependencies.append(str(sorted(site.config.get("MARKDOWN_EXTENSIONS"))))
65
66 def compile_html(self, source, dest, is_two_file=True):
67 """Compile source file into HTML and save as dest."""
68 if markdown is None:
69 req_missing(['markdown'], 'build this site (compile Markdown)')
70 makedirs(os.path.dirname(dest))
71 self.extensions += self.site.config.get("MARKDOWN_EXTENSIONS")
72 try:
73 post = self.site.post_per_input_file[source]
74 except KeyError:
75 post = None
76 with io.open(dest, "w+", encoding="utf8") as out_file:
77 with io.open(source, "r", encoding="utf8") as in_file:
78 data = in_file.read()
79 if not is_two_file:
80 _, data = self.split_metadata(data)
81 output = markdown(data, self.extensions)
82 output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))
83 out_file.write(output)
84 if post is None:
85 if shortcode_deps:
86 self.logger.error(
87 "Cannot save dependencies for post {0} due to unregistered source file name",
88 source)
89 else:
90 post._depfile[dest] += shortcode_deps
91
92 def create_post(self, path, **kw):
93 """Create a new post."""
94 content = kw.pop('content', None)
95 onefile = kw.pop('onefile', False)
96 # is_page is not used by create_post as of now.
97 kw.pop('is_page', False)
98
99 metadata = {}
100 metadata.update(self.default_metadata)
101 metadata.update(kw)
102 makedirs(os.path.dirname(path))
103 if not content.endswith('\n'):
104 content += '\n'
105 with io.open(path, "w+", encoding="utf8") as fd:
106 if onefile:
107 fd.write('<!-- \n')
108 fd.write(write_metadata(metadata))
109 fd.write('-->\n\n')
110 fd.write(content)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py
--- a/nikola/plugins/compile/markdown/__init__.py
+++ b/nikola/plugins/compile/markdown/__init__.py
@@ -78,7 +78,7 @@
data = in_file.read()
if not is_two_file:
_, data = self.split_metadata(data)
- output = markdown(data, self.extensions)
+ output = markdown(data, self.extensions, output_format="html5")
output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))
out_file.write(output)
if post is None:
|
{"golden_diff": "diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py\n--- a/nikola/plugins/compile/markdown/__init__.py\n+++ b/nikola/plugins/compile/markdown/__init__.py\n@@ -78,7 +78,7 @@\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n- output = markdown(data, self.extensions)\n+ output = markdown(data, self.extensions, output_format=\"html5\")\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n", "issue": "Nikola generates invalid html5 when markdown footnote extension is used\nThe [default output type](http://pythonhosted.org/Markdown/reference.html#markdown) of the python markdown library is xhtml1. The 4 templates that ship with Nikola have <!DOCTYPE html> which defines them as html5, so I'm assuming that we're intending to generate html5.\n\nWhen the footnote markdown extension is used, it generates invalid html5 according to the w3c validator.\n\n`<a class=\"footnote-ref\" href=\"...\" rev=\"footnote\">...</a>`\n\n(rev=\"footnote\" is valid html4, but not html5)\n\nThe markdown library indicates that this is invalid html5 (https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/footnotes.py#L149) so we can trigger the correct behaviour by setting the output_format.\n\nGiven the markdown library does not make much use of the output_format variable, I don't think this is likely to materially change the output for many people at all - https://github.com/waylan/Python-Markdown/search?utf8=%E2%9C%93&q=output_format)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on markdown.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport io\nimport os\n\ntry:\n from markdown import markdown\nexcept ImportError:\n markdown = None # NOQA\n nikola_extension = None\n gist_extension = None\n podcast_extension = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, write_metadata\n\n\nclass CompileMarkdown(PageCompiler):\n \"\"\"Compile Markdown into HTML.\"\"\"\n\n name = \"markdown\"\n friendly_name = \"Markdown\"\n demote_headers = True\n extensions = []\n site = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n super(CompileMarkdown, self).set_site(site)\n self.config_dependencies = []\n for plugin_info in self.get_compiler_extensions():\n self.config_dependencies.append(plugin_info.name)\n self.extensions.append(plugin_info.plugin_object)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.config_dependencies.append(str(sorted(site.config.get(\"MARKDOWN_EXTENSIONS\"))))\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n if markdown is None:\n req_missing(['markdown'], 'build this site (compile Markdown)')\n makedirs(os.path.dirname(dest))\n self.extensions += self.site.config.get(\"MARKDOWN_EXTENSIONS\")\n try:\n post = self.site.post_per_input_file[source]\n except KeyError:\n post = None\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n output = markdown(data, self.extensions)\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} due to unregistered source file name\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write('<!-- \\n')\n fd.write(write_metadata(metadata))\n fd.write('-->\\n\\n')\n fd.write(content)\n", "path": "nikola/plugins/compile/markdown/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on markdown.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport io\nimport os\n\ntry:\n from markdown import markdown\nexcept ImportError:\n markdown = None # NOQA\n nikola_extension = None\n gist_extension = None\n podcast_extension = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, write_metadata\n\n\nclass CompileMarkdown(PageCompiler):\n \"\"\"Compile Markdown into HTML.\"\"\"\n\n name = \"markdown\"\n friendly_name = \"Markdown\"\n demote_headers = True\n extensions = []\n site = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n super(CompileMarkdown, self).set_site(site)\n self.config_dependencies = []\n for plugin_info in self.get_compiler_extensions():\n self.config_dependencies.append(plugin_info.name)\n self.extensions.append(plugin_info.plugin_object)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.config_dependencies.append(str(sorted(site.config.get(\"MARKDOWN_EXTENSIONS\"))))\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n if markdown is None:\n req_missing(['markdown'], 'build this site (compile Markdown)')\n makedirs(os.path.dirname(dest))\n self.extensions += self.site.config.get(\"MARKDOWN_EXTENSIONS\")\n try:\n post = self.site.post_per_input_file[source]\n except KeyError:\n post = None\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n output = markdown(data, self.extensions, output_format=\"html5\")\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} due to unregistered source file name\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write('<!-- \\n')\n fd.write(write_metadata(metadata))\n fd.write('-->\\n\\n')\n fd.write(content)\n", "path": "nikola/plugins/compile/markdown/__init__.py"}]}
| 1,619 | 160 |
gh_patches_debug_12528
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-281
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better HTTP response code handling in Eligibility API
In the Eligibility API `Client`, we currently attempt to parse the response as if it were a JWT, regardless of response code:
From https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/api.py#L145
```python
try:
r = requests.get(self.verifier.api_url, headers=self._auth_headers(token))
except requests.ConnectionError:
raise ApiError("Connection to verification server failed")
except requests.Timeout:
raise ApiError("Connection to verification server timed out")
except requests.TooManyRedirects:
raise ApiError("Too many redirects to verification server")
except requests.HTTPError as e:
raise ApiError(e)
return self._tokenize_response(r)
```
Since input errors on the form are returned as JWTs, the same as success payloads, this code worked fine for 200 and 400 responses. But if the API outright rejects the call with a 403, the above code attempts to parse _that_ response as a JWT, throwing an unhandled exception.
Let's guard the `return self._tokenize_response(r)` to ensure we are only trying to tokenize the expected 200 and 400 responses; other codes should raise an `ApiError`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/eligibility/api.py`
Content:
```
1 """
2 The eligibility application: Eligibility Verification API implementation.
3 """
4 import datetime
5 import json
6 import logging
7 import uuid
8
9 from jwcrypto import common as jwcrypto, jwe, jws, jwt
10 import requests
11
12 from benefits.settings import ALLOWED_HOSTS
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class ApiError(Exception):
19 """Error calling the Eligibility Verification API."""
20
21 pass
22
23
24 class TokenError(Exception):
25 """Error with API request/response token."""
26
27 pass
28
29
30 class RequestToken:
31 """Eligibility Verification API request token."""
32
33 def __init__(self, agency, verifier, sub, name):
34 logger.info("Initialize new request token")
35
36 # send the eligibility type names
37 types = list(map(lambda t: t.name, agency.types_to_verify()))
38
39 # craft the main token payload
40 payload = dict(
41 jti=str(uuid.uuid4()),
42 iss=ALLOWED_HOSTS[0],
43 iat=int(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).timestamp()),
44 agency=agency.agency_id,
45 eligibility=types,
46 sub=sub,
47 name=name,
48 )
49
50 logger.debug("Sign token payload with agency's private key")
51 header = {"typ": "JWS", "alg": agency.jws_signing_alg}
52 signed_token = jwt.JWT(header=header, claims=payload)
53 signed_token.make_signed_token(agency.private_jwk)
54 signed_payload = signed_token.serialize()
55
56 logger.debug("Encrypt signed token payload with verifier's public key")
57 header = {"typ": "JWE", "alg": verifier.jwe_encryption_alg, "enc": verifier.jwe_cek_enc}
58 encrypted_token = jwt.JWT(header=header, claims=signed_payload)
59 encrypted_token.make_encrypted_token(verifier.public_jwk)
60
61 logger.info("Signed and encrypted request token initialized")
62 self._jwe = encrypted_token
63
64 def __repr__(self):
65 return str(self)
66
67 def __str__(self):
68 return self._jwe.serialize()
69
70
71 class ResponseToken:
72 """Eligibility Verification API response token."""
73
74 def __init__(self, response, agency, verifier):
75 logger.info("Read encrypted token from response")
76
77 try:
78 encrypted_signed_token = response.text
79 if not encrypted_signed_token:
80 raise ValueError()
81 # strip extra spaces and wrapping quote chars
82 encrypted_signed_token = encrypted_signed_token.strip("'\n\"")
83 except ValueError:
84 raise TokenError("Invalid response format")
85
86 logger.debug("Decrypt response token using agency's private key")
87 allowed_algs = [verifier.jwe_encryption_alg, verifier.jwe_cek_enc]
88 decrypted_token = jwe.JWE(algs=allowed_algs)
89 try:
90 decrypted_token.deserialize(encrypted_signed_token, key=agency.private_jwk)
91 except jwe.InvalidJWEData:
92 raise TokenError("Invalid JWE token")
93 except jwe.InvalidJWEOperation:
94 raise TokenError("JWE token decryption failed")
95
96 decrypted_payload = str(decrypted_token.payload, "utf-8")
97
98 logger.debug("Verify decrypted response token's signature using verifier's public key")
99 signed_token = jws.JWS()
100 try:
101 signed_token.deserialize(decrypted_payload, key=verifier.public_jwk, alg=agency.jws_signing_alg)
102 except jws.InvalidJWSObject:
103 raise TokenError("Invalid JWS token")
104 except jws.InvalidJWSSignature:
105 raise TokenError("JWS token signature verification failed")
106
107 logger.info("Response token decrypted and signature verified")
108
109 payload = json.loads(str(signed_token.payload, "utf-8"))
110 self.eligibility = list(payload.get("eligibility", []))
111 self.error = payload.get("error", None)
112
113
114 class Client:
115 """Eligibility Verification API HTTP client."""
116
117 def __init__(self, agency):
118 logger.debug(f"Initialize client for agency: {agency.short_name}")
119 self.agency = agency
120 self.verifier = agency.eligibility_verifier
121
122 def _tokenize_request(self, sub, name):
123 """Create a request token."""
124 return RequestToken(self.agency, self.verifier, sub, name)
125
126 def _tokenize_response(self, response):
127 """Parse a response token."""
128 return ResponseToken(response, self.agency, self.verifier)
129
130 def _auth_headers(self, token):
131 """Create headers for the request with the token and verifier API keys"""
132 headers = dict(Authorization=f"Bearer {token}")
133 headers[self.verifier.api_auth_header] = self.verifier.api_auth_key
134 return headers
135
136 def _request(self, sub, name):
137 """Make an API request for eligibility verification."""
138 logger.debug("Start new eligibility verification request")
139
140 try:
141 token = self._tokenize_request(sub, name)
142 except jwcrypto.JWException:
143 raise TokenError("Failed to tokenize form values")
144
145 try:
146 logger.debug(f"GET request to {self.verifier.api_url}")
147 r = requests.get(self.verifier.api_url, headers=self._auth_headers(token))
148 except requests.ConnectionError:
149 raise ApiError("Connection to verification server failed")
150 except requests.Timeout:
151 raise ApiError("Connection to verification server timed out")
152 except requests.TooManyRedirects:
153 raise ApiError("Too many redirects to verification server")
154 except requests.HTTPError as e:
155 raise ApiError(e)
156
157 logger.debug("Process eligiblity verification response")
158 return self._tokenize_response(r)
159
160 def verify(self, sub, name):
161 """Check eligibility for the subject and name."""
162 return self._request(sub, name)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/eligibility/api.py b/benefits/eligibility/api.py
--- a/benefits/eligibility/api.py
+++ b/benefits/eligibility/api.py
@@ -154,8 +154,13 @@
except requests.HTTPError as e:
raise ApiError(e)
- logger.debug("Process eligiblity verification response")
- return self._tokenize_response(r)
+ expected_status_codes = {200, 400}
+ if r.status_code in expected_status_codes:
+ logger.debug("Process eligiblity verification response")
+ return self._tokenize_response(r)
+ else:
+ logger.warning(f"Unexpected eligibility verification response status code: {r.status_code}")
+ raise ApiError("Unexpected eligibility verification response")
def verify(self, sub, name):
"""Check eligibility for the subject and name."""
|
{"golden_diff": "diff --git a/benefits/eligibility/api.py b/benefits/eligibility/api.py\n--- a/benefits/eligibility/api.py\n+++ b/benefits/eligibility/api.py\n@@ -154,8 +154,13 @@\n except requests.HTTPError as e:\n raise ApiError(e)\n \n- logger.debug(\"Process eligiblity verification response\")\n- return self._tokenize_response(r)\n+ expected_status_codes = {200, 400}\n+ if r.status_code in expected_status_codes:\n+ logger.debug(\"Process eligiblity verification response\")\n+ return self._tokenize_response(r)\n+ else:\n+ logger.warning(f\"Unexpected eligibility verification response status code: {r.status_code}\")\n+ raise ApiError(\"Unexpected eligibility verification response\")\n \n def verify(self, sub, name):\n \"\"\"Check eligibility for the subject and name.\"\"\"\n", "issue": "Better HTTP response code handling in Eligibility API\nIn the Eligibility API `Client`, we currently attempt to parse the response as if it were a JWT, regardless of response code:\r\n\r\nFrom https://github.com/cal-itp/benefits/blob/dev/benefits/eligibility/api.py#L145\r\n\r\n```python\r\ntry:\r\n r = requests.get(self.verifier.api_url, headers=self._auth_headers(token))\r\nexcept requests.ConnectionError:\r\n raise ApiError(\"Connection to verification server failed\")\r\nexcept requests.Timeout:\r\n raise ApiError(\"Connection to verification server timed out\")\r\nexcept requests.TooManyRedirects:\r\n raise ApiError(\"Too many redirects to verification server\")\r\nexcept requests.HTTPError as e:\r\n raise ApiError(e)\r\n\r\nreturn self._tokenize_response(r)\r\n```\r\n\r\nSince input errors on the form are returned as JWTs, the same as success payloads, this code worked fine for 200 and 400 responses. But if the API outright rejects the call with a 403, the above code attempts to parse _that_ response as a JWT, throwing an unhandled exception.\r\n\r\nLet's guard the `return self._tokenize_response(r)` to ensure we are only trying to tokenize the expected 200 and 400 responses; other codes should raise an `ApiError`.\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Eligibility Verification API implementation.\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport uuid\n\nfrom jwcrypto import common as jwcrypto, jwe, jws, jwt\nimport requests\n\nfrom benefits.settings import ALLOWED_HOSTS\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApiError(Exception):\n \"\"\"Error calling the Eligibility Verification API.\"\"\"\n\n pass\n\n\nclass TokenError(Exception):\n \"\"\"Error with API request/response token.\"\"\"\n\n pass\n\n\nclass RequestToken:\n \"\"\"Eligibility Verification API request token.\"\"\"\n\n def __init__(self, agency, verifier, sub, name):\n logger.info(\"Initialize new request token\")\n\n # send the eligibility type names\n types = list(map(lambda t: t.name, agency.types_to_verify()))\n\n # craft the main token payload\n payload = dict(\n jti=str(uuid.uuid4()),\n iss=ALLOWED_HOSTS[0],\n iat=int(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).timestamp()),\n agency=agency.agency_id,\n eligibility=types,\n sub=sub,\n name=name,\n )\n\n logger.debug(\"Sign token payload with agency's private key\")\n header = {\"typ\": \"JWS\", \"alg\": agency.jws_signing_alg}\n signed_token = jwt.JWT(header=header, claims=payload)\n signed_token.make_signed_token(agency.private_jwk)\n signed_payload = signed_token.serialize()\n\n logger.debug(\"Encrypt signed token payload with verifier's public key\")\n header = {\"typ\": \"JWE\", \"alg\": verifier.jwe_encryption_alg, \"enc\": verifier.jwe_cek_enc}\n encrypted_token = jwt.JWT(header=header, claims=signed_payload)\n encrypted_token.make_encrypted_token(verifier.public_jwk)\n\n logger.info(\"Signed and encrypted request token initialized\")\n self._jwe = encrypted_token\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return self._jwe.serialize()\n\n\nclass ResponseToken:\n \"\"\"Eligibility Verification API response token.\"\"\"\n\n def __init__(self, response, agency, verifier):\n logger.info(\"Read encrypted token from response\")\n\n try:\n encrypted_signed_token = response.text\n if not encrypted_signed_token:\n raise ValueError()\n # strip extra spaces and wrapping quote chars\n encrypted_signed_token = encrypted_signed_token.strip(\"'\\n\\\"\")\n except ValueError:\n raise TokenError(\"Invalid response format\")\n\n logger.debug(\"Decrypt response token using agency's private key\")\n allowed_algs = [verifier.jwe_encryption_alg, verifier.jwe_cek_enc]\n decrypted_token = jwe.JWE(algs=allowed_algs)\n try:\n decrypted_token.deserialize(encrypted_signed_token, key=agency.private_jwk)\n except jwe.InvalidJWEData:\n raise TokenError(\"Invalid JWE token\")\n except jwe.InvalidJWEOperation:\n raise TokenError(\"JWE token decryption failed\")\n\n decrypted_payload = str(decrypted_token.payload, \"utf-8\")\n\n logger.debug(\"Verify decrypted response token's signature using verifier's public key\")\n signed_token = jws.JWS()\n try:\n signed_token.deserialize(decrypted_payload, key=verifier.public_jwk, alg=agency.jws_signing_alg)\n except jws.InvalidJWSObject:\n raise TokenError(\"Invalid JWS token\")\n except jws.InvalidJWSSignature:\n raise TokenError(\"JWS token signature verification failed\")\n\n logger.info(\"Response token decrypted and signature verified\")\n\n payload = json.loads(str(signed_token.payload, \"utf-8\"))\n self.eligibility = list(payload.get(\"eligibility\", []))\n self.error = payload.get(\"error\", None)\n\n\nclass Client:\n \"\"\"Eligibility Verification API HTTP client.\"\"\"\n\n def __init__(self, agency):\n logger.debug(f\"Initialize client for agency: {agency.short_name}\")\n self.agency = agency\n self.verifier = agency.eligibility_verifier\n\n def _tokenize_request(self, sub, name):\n \"\"\"Create a request token.\"\"\"\n return RequestToken(self.agency, self.verifier, sub, name)\n\n def _tokenize_response(self, response):\n \"\"\"Parse a response token.\"\"\"\n return ResponseToken(response, self.agency, self.verifier)\n\n def _auth_headers(self, token):\n \"\"\"Create headers for the request with the token and verifier API keys\"\"\"\n headers = dict(Authorization=f\"Bearer {token}\")\n headers[self.verifier.api_auth_header] = self.verifier.api_auth_key\n return headers\n\n def _request(self, sub, name):\n \"\"\"Make an API request for eligibility verification.\"\"\"\n logger.debug(\"Start new eligibility verification request\")\n\n try:\n token = self._tokenize_request(sub, name)\n except jwcrypto.JWException:\n raise TokenError(\"Failed to tokenize form values\")\n\n try:\n logger.debug(f\"GET request to {self.verifier.api_url}\")\n r = requests.get(self.verifier.api_url, headers=self._auth_headers(token))\n except requests.ConnectionError:\n raise ApiError(\"Connection to verification server failed\")\n except requests.Timeout:\n raise ApiError(\"Connection to verification server timed out\")\n except requests.TooManyRedirects:\n raise ApiError(\"Too many redirects to verification server\")\n except requests.HTTPError as e:\n raise ApiError(e)\n\n logger.debug(\"Process eligiblity verification response\")\n return self._tokenize_response(r)\n\n def verify(self, sub, name):\n \"\"\"Check eligibility for the subject and name.\"\"\"\n return self._request(sub, name)\n", "path": "benefits/eligibility/api.py"}], "after_files": [{"content": "\"\"\"\nThe eligibility application: Eligibility Verification API implementation.\n\"\"\"\nimport datetime\nimport json\nimport logging\nimport uuid\n\nfrom jwcrypto import common as jwcrypto, jwe, jws, jwt\nimport requests\n\nfrom benefits.settings import ALLOWED_HOSTS\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApiError(Exception):\n \"\"\"Error calling the Eligibility Verification API.\"\"\"\n\n pass\n\n\nclass TokenError(Exception):\n \"\"\"Error with API request/response token.\"\"\"\n\n pass\n\n\nclass RequestToken:\n \"\"\"Eligibility Verification API request token.\"\"\"\n\n def __init__(self, agency, verifier, sub, name):\n logger.info(\"Initialize new request token\")\n\n # send the eligibility type names\n types = list(map(lambda t: t.name, agency.types_to_verify()))\n\n # craft the main token payload\n payload = dict(\n jti=str(uuid.uuid4()),\n iss=ALLOWED_HOSTS[0],\n iat=int(datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).timestamp()),\n agency=agency.agency_id,\n eligibility=types,\n sub=sub,\n name=name,\n )\n\n logger.debug(\"Sign token payload with agency's private key\")\n header = {\"typ\": \"JWS\", \"alg\": agency.jws_signing_alg}\n signed_token = jwt.JWT(header=header, claims=payload)\n signed_token.make_signed_token(agency.private_jwk)\n signed_payload = signed_token.serialize()\n\n logger.debug(\"Encrypt signed token payload with verifier's public key\")\n header = {\"typ\": \"JWE\", \"alg\": verifier.jwe_encryption_alg, \"enc\": verifier.jwe_cek_enc}\n encrypted_token = jwt.JWT(header=header, claims=signed_payload)\n encrypted_token.make_encrypted_token(verifier.public_jwk)\n\n logger.info(\"Signed and encrypted request token initialized\")\n self._jwe = encrypted_token\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return self._jwe.serialize()\n\n\nclass ResponseToken:\n \"\"\"Eligibility Verification API response token.\"\"\"\n\n def __init__(self, response, agency, verifier):\n logger.info(\"Read encrypted token from response\")\n\n try:\n encrypted_signed_token = response.text\n if not encrypted_signed_token:\n raise ValueError()\n # strip extra spaces and wrapping quote chars\n encrypted_signed_token = encrypted_signed_token.strip(\"'\\n\\\"\")\n except ValueError:\n raise TokenError(\"Invalid response format\")\n\n logger.debug(\"Decrypt response token using agency's private key\")\n allowed_algs = [verifier.jwe_encryption_alg, verifier.jwe_cek_enc]\n decrypted_token = jwe.JWE(algs=allowed_algs)\n try:\n decrypted_token.deserialize(encrypted_signed_token, key=agency.private_jwk)\n except jwe.InvalidJWEData:\n raise TokenError(\"Invalid JWE token\")\n except jwe.InvalidJWEOperation:\n raise TokenError(\"JWE token decryption failed\")\n\n decrypted_payload = str(decrypted_token.payload, \"utf-8\")\n\n logger.debug(\"Verify decrypted response token's signature using verifier's public key\")\n signed_token = jws.JWS()\n try:\n signed_token.deserialize(decrypted_payload, key=verifier.public_jwk, alg=agency.jws_signing_alg)\n except jws.InvalidJWSObject:\n raise TokenError(\"Invalid JWS token\")\n except jws.InvalidJWSSignature:\n raise TokenError(\"JWS token signature verification failed\")\n\n logger.info(\"Response token decrypted and signature verified\")\n\n payload = json.loads(str(signed_token.payload, \"utf-8\"))\n self.eligibility = list(payload.get(\"eligibility\", []))\n self.error = payload.get(\"error\", None)\n\n\nclass Client:\n \"\"\"Eligibility Verification API HTTP client.\"\"\"\n\n def __init__(self, agency):\n logger.debug(f\"Initialize client for agency: {agency.short_name}\")\n self.agency = agency\n self.verifier = agency.eligibility_verifier\n\n def _tokenize_request(self, sub, name):\n \"\"\"Create a request token.\"\"\"\n return RequestToken(self.agency, self.verifier, sub, name)\n\n def _tokenize_response(self, response):\n \"\"\"Parse a response token.\"\"\"\n return ResponseToken(response, self.agency, self.verifier)\n\n def _auth_headers(self, token):\n \"\"\"Create headers for the request with the token and verifier API keys\"\"\"\n headers = dict(Authorization=f\"Bearer {token}\")\n headers[self.verifier.api_auth_header] = self.verifier.api_auth_key\n return headers\n\n def _request(self, sub, name):\n \"\"\"Make an API request for eligibility verification.\"\"\"\n logger.debug(\"Start new eligibility verification request\")\n\n try:\n token = self._tokenize_request(sub, name)\n except jwcrypto.JWException:\n raise TokenError(\"Failed to tokenize form values\")\n\n try:\n logger.debug(f\"GET request to {self.verifier.api_url}\")\n r = requests.get(self.verifier.api_url, headers=self._auth_headers(token))\n except requests.ConnectionError:\n raise ApiError(\"Connection to verification server failed\")\n except requests.Timeout:\n raise ApiError(\"Connection to verification server timed out\")\n except requests.TooManyRedirects:\n raise ApiError(\"Too many redirects to verification server\")\n except requests.HTTPError as e:\n raise ApiError(e)\n\n expected_status_codes = {200, 400}\n if r.status_code in expected_status_codes:\n logger.debug(\"Process eligiblity verification response\")\n return self._tokenize_response(r)\n else:\n logger.warning(f\"Unexpected eligibility verification response status code: {r.status_code}\")\n raise ApiError(\"Unexpected eligibility verification response\")\n\n def verify(self, sub, name):\n \"\"\"Check eligibility for the subject and name.\"\"\"\n return self._request(sub, name)\n", "path": "benefits/eligibility/api.py"}]}
| 2,138 | 200 |
gh_patches_debug_10981
|
rasdani/github-patches
|
git_diff
|
sbi-dev__sbi-442
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Training SNRE_A on GPU fails
Hi! Using SNRE_A with `device="gpu"` currently fails :-(
The error is as follows:
```
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
```
The origin of the issue is in `SNRE_A._loss` which instantiates `labels` without moving it to the device. Adding
```
labels = labels.to(self._device)
```
below line 126 of `sbi/inference/snre/snre_a.py` fixes the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sbi/inference/snre/snre_a.py`
Content:
```
1 from typing import Any, Callable, Dict, Optional, Union
2
3 import torch
4 from torch import Tensor, nn, ones
5
6 from sbi.inference.posteriors.base_posterior import NeuralPosterior
7 from sbi.inference.snre.snre_base import RatioEstimator
8 from sbi.types import TensorboardSummaryWriter
9 from sbi.utils import del_entries
10
11
12 class SNRE_A(RatioEstimator):
13 def __init__(
14 self,
15 prior,
16 classifier: Union[str, Callable] = "resnet",
17 device: str = "cpu",
18 logging_level: Union[int, str] = "warning",
19 summary_writer: Optional[TensorboardSummaryWriter] = None,
20 show_progress_bars: bool = True,
21 **unused_args
22 ):
23 r"""AALR[1], here known as SNRE_A.
24
25 [1] _Likelihood-free MCMC with Amortized Approximate Likelihood Ratios_, Hermans
26 et al., ICML 2020, https://arxiv.org/abs/1903.04057
27
28 Args:
29 prior: A probability distribution that expresses prior knowledge about the
30 parameters, e.g. which ranges are meaningful for them. Any
31 object with `.log_prob()`and `.sample()` (for example, a PyTorch
32 distribution) can be used.
33 classifier: Classifier trained to approximate likelihood ratios. If it is
34 a string, use a pre-configured network of the provided type (one of
35 linear, mlp, resnet). Alternatively, a function that builds a custom
36 neural network can be provided. The function will be called with the
37 first batch of simulations (theta, x), which can thus be used for shape
38 inference and potentially for z-scoring. It needs to return a PyTorch
39 `nn.Module` implementing the classifier.
40 device: torch device on which to compute, e.g. gpu, cpu.
41 logging_level: Minimum severity of messages to log. One of the strings
42 INFO, WARNING, DEBUG, ERROR and CRITICAL.
43 summary_writer: A tensorboard `SummaryWriter` to control, among others, log
44 file location (default is `<current working directory>/logs`.)
45 show_progress_bars: Whether to show a progressbar during simulation and
46 sampling.
47 unused_args: Absorbs additional arguments. No entries will be used. If it
48 is not empty, we warn. In future versions, when the new interface of
49 0.14.0 is more mature, we will remove this argument.
50 """
51
52 kwargs = del_entries(locals(), entries=("self", "__class__", "unused_args"))
53 super().__init__(**kwargs, **unused_args)
54
55 def train(
56 self,
57 training_batch_size: int = 50,
58 learning_rate: float = 5e-4,
59 validation_fraction: float = 0.1,
60 stop_after_epochs: int = 20,
61 max_num_epochs: Optional[int] = None,
62 clip_max_norm: Optional[float] = 5.0,
63 exclude_invalid_x: bool = True,
64 resume_training: bool = False,
65 discard_prior_samples: bool = False,
66 retrain_from_scratch_each_round: bool = False,
67 show_train_summary: bool = False,
68 ) -> NeuralPosterior:
69 r"""
70 Return classifier that approximates the ratio $p(\theta,x)/p(\theta)p(x)$.
71
72 Args:
73 training_batch_size: Training batch size.
74 learning_rate: Learning rate for Adam optimizer.
75 validation_fraction: The fraction of data to use for validation.
76 stop_after_epochs: The number of epochs to wait for improvement on the
77 validation set before terminating training.
78 max_num_epochs: Maximum number of epochs to run. If reached, we stop
79 training even when the validation loss is still decreasing. If None, we
80 train until validation loss increases (see also `stop_after_epochs`).
81 clip_max_norm: Value at which to clip the total gradient norm in order to
82 prevent exploding gradients. Use None for no clipping.
83 exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=±∞`
84 during training. Expect errors, silent or explicit, when `False`.
85 resume_training: Can be used in case training time is limited, e.g. on a
86 cluster. If `True`, the split between train and validation set, the
87 optimizer, the number of epochs, and the best validation log-prob will
88 be restored from the last time `.train()` was called.
89 discard_prior_samples: Whether to discard samples simulated in round 1, i.e.
90 from the prior. Training may be sped up by ignoring such less targeted
91 samples.
92 retrain_from_scratch_each_round: Whether to retrain the conditional density
93 estimator for the posterior from scratch each round.
94 show_train_summary: Whether to print the number of epochs and validation
95 loss and leakage after the training.
96
97 Returns:
98 Classifier that approximates the ratio $p(\theta,x)/p(\theta)p(x)$.
99 """
100
101 # AALR is defined for `num_atoms=2`.
102 # Proxy to `super().__call__` to ensure right parameter.
103 kwargs = del_entries(locals(), entries=("self", "__class__"))
104 return super().train(**kwargs, num_atoms=2)
105
106 def _loss(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:
107 """
108 Returns the binary cross-entropy loss for the trained classifier.
109
110 The classifier takes as input a $(\theta,x)$ pair. It is trained to predict 1
111 if the pair was sampled from the joint $p(\theta,x)$, and to predict 0 if the
112 pair was sampled from the marginals $p(\theta)p(x)$.
113 """
114
115 assert theta.shape[0] == x.shape[0], "Batch sizes for theta and x must match."
116 batch_size = theta.shape[0]
117
118 logits = self._classifier_logits(theta, x, num_atoms)
119 likelihood = torch.sigmoid(logits).squeeze()
120
121 # Alternating pairs where there is one sampled from the joint and one
122 # sampled from the marginals. The first element is sampled from the
123 # joint p(theta, x) and is labelled 1. The second element is sampled
124 # from the marginals p(theta)p(x) and is labelled 0. And so on.
125 labels = ones(2 * batch_size) # two atoms
126 labels[1::2] = 0.0
127
128 # Binary cross entropy to learn the likelihood (AALR-specific)
129 return nn.BCELoss()(likelihood, labels)
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sbi/inference/snre/snre_a.py b/sbi/inference/snre/snre_a.py
--- a/sbi/inference/snre/snre_a.py
+++ b/sbi/inference/snre/snre_a.py
@@ -122,7 +122,7 @@
# sampled from the marginals. The first element is sampled from the
# joint p(theta, x) and is labelled 1. The second element is sampled
# from the marginals p(theta)p(x) and is labelled 0. And so on.
- labels = ones(2 * batch_size) # two atoms
+ labels = ones(2 * batch_size, device=self._device) # two atoms
labels[1::2] = 0.0
# Binary cross entropy to learn the likelihood (AALR-specific)
|
{"golden_diff": "diff --git a/sbi/inference/snre/snre_a.py b/sbi/inference/snre/snre_a.py\n--- a/sbi/inference/snre/snre_a.py\n+++ b/sbi/inference/snre/snre_a.py\n@@ -122,7 +122,7 @@\n # sampled from the marginals. The first element is sampled from the\n # joint p(theta, x) and is labelled 1. The second element is sampled\n # from the marginals p(theta)p(x) and is labelled 0. And so on.\n- labels = ones(2 * batch_size) # two atoms\n+ labels = ones(2 * batch_size, device=self._device) # two atoms\n labels[1::2] = 0.0\n \n # Binary cross entropy to learn the likelihood (AALR-specific)\n", "issue": "Training SNRE_A on GPU fails\nHi! Using SNRE_A with `device=\"gpu\"` currently fails :-(\r\n\r\nThe error is as follows:\r\n```\r\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\r\n```\r\nThe origin of the issue is in `SNRE_A._loss` which instantiates `labels` without moving it to the device. Adding \r\n```\r\nlabels = labels.to(self._device)\r\n```\r\nbelow line 126 of `sbi/inference/snre/snre_a.py` fixes the issue.\n", "before_files": [{"content": "from typing import Any, Callable, Dict, Optional, Union\n\nimport torch\nfrom torch import Tensor, nn, ones\n\nfrom sbi.inference.posteriors.base_posterior import NeuralPosterior\nfrom sbi.inference.snre.snre_base import RatioEstimator\nfrom sbi.types import TensorboardSummaryWriter\nfrom sbi.utils import del_entries\n\n\nclass SNRE_A(RatioEstimator):\n def __init__(\n self,\n prior,\n classifier: Union[str, Callable] = \"resnet\",\n device: str = \"cpu\",\n logging_level: Union[int, str] = \"warning\",\n summary_writer: Optional[TensorboardSummaryWriter] = None,\n show_progress_bars: bool = True,\n **unused_args\n ):\n r\"\"\"AALR[1], here known as SNRE_A.\n\n [1] _Likelihood-free MCMC with Amortized Approximate Likelihood Ratios_, Hermans\n et al., ICML 2020, https://arxiv.org/abs/1903.04057\n\n Args:\n prior: A probability distribution that expresses prior knowledge about the\n parameters, e.g. which ranges are meaningful for them. Any\n object with `.log_prob()`and `.sample()` (for example, a PyTorch\n distribution) can be used.\n classifier: Classifier trained to approximate likelihood ratios. If it is\n a string, use a pre-configured network of the provided type (one of\n linear, mlp, resnet). Alternatively, a function that builds a custom\n neural network can be provided. The function will be called with the\n first batch of simulations (theta, x), which can thus be used for shape\n inference and potentially for z-scoring. It needs to return a PyTorch\n `nn.Module` implementing the classifier.\n device: torch device on which to compute, e.g. gpu, cpu.\n logging_level: Minimum severity of messages to log. One of the strings\n INFO, WARNING, DEBUG, ERROR and CRITICAL.\n summary_writer: A tensorboard `SummaryWriter` to control, among others, log\n file location (default is `<current working directory>/logs`.)\n show_progress_bars: Whether to show a progressbar during simulation and\n sampling.\n unused_args: Absorbs additional arguments. No entries will be used. If it\n is not empty, we warn. In future versions, when the new interface of\n 0.14.0 is more mature, we will remove this argument.\n \"\"\"\n\n kwargs = del_entries(locals(), entries=(\"self\", \"__class__\", \"unused_args\"))\n super().__init__(**kwargs, **unused_args)\n\n def train(\n self,\n training_batch_size: int = 50,\n learning_rate: float = 5e-4,\n validation_fraction: float = 0.1,\n stop_after_epochs: int = 20,\n max_num_epochs: Optional[int] = None,\n clip_max_norm: Optional[float] = 5.0,\n exclude_invalid_x: bool = True,\n resume_training: bool = False,\n discard_prior_samples: bool = False,\n retrain_from_scratch_each_round: bool = False,\n show_train_summary: bool = False,\n ) -> NeuralPosterior:\n r\"\"\"\n Return classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n\n Args:\n training_batch_size: Training batch size.\n learning_rate: Learning rate for Adam optimizer.\n validation_fraction: The fraction of data to use for validation.\n stop_after_epochs: The number of epochs to wait for improvement on the\n validation set before terminating training.\n max_num_epochs: Maximum number of epochs to run. If reached, we stop\n training even when the validation loss is still decreasing. If None, we\n train until validation loss increases (see also `stop_after_epochs`).\n clip_max_norm: Value at which to clip the total gradient norm in order to\n prevent exploding gradients. Use None for no clipping.\n exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=\u00b1\u221e`\n during training. Expect errors, silent or explicit, when `False`.\n resume_training: Can be used in case training time is limited, e.g. on a\n cluster. If `True`, the split between train and validation set, the\n optimizer, the number of epochs, and the best validation log-prob will\n be restored from the last time `.train()` was called.\n discard_prior_samples: Whether to discard samples simulated in round 1, i.e.\n from the prior. Training may be sped up by ignoring such less targeted\n samples.\n retrain_from_scratch_each_round: Whether to retrain the conditional density\n estimator for the posterior from scratch each round.\n show_train_summary: Whether to print the number of epochs and validation\n loss and leakage after the training.\n\n Returns:\n Classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n \"\"\"\n\n # AALR is defined for `num_atoms=2`.\n # Proxy to `super().__call__` to ensure right parameter.\n kwargs = del_entries(locals(), entries=(\"self\", \"__class__\"))\n return super().train(**kwargs, num_atoms=2)\n\n def _loss(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:\n \"\"\"\n Returns the binary cross-entropy loss for the trained classifier.\n\n The classifier takes as input a $(\\theta,x)$ pair. It is trained to predict 1\n if the pair was sampled from the joint $p(\\theta,x)$, and to predict 0 if the\n pair was sampled from the marginals $p(\\theta)p(x)$.\n \"\"\"\n\n assert theta.shape[0] == x.shape[0], \"Batch sizes for theta and x must match.\"\n batch_size = theta.shape[0]\n\n logits = self._classifier_logits(theta, x, num_atoms)\n likelihood = torch.sigmoid(logits).squeeze()\n\n # Alternating pairs where there is one sampled from the joint and one\n # sampled from the marginals. The first element is sampled from the\n # joint p(theta, x) and is labelled 1. The second element is sampled\n # from the marginals p(theta)p(x) and is labelled 0. And so on.\n labels = ones(2 * batch_size) # two atoms\n labels[1::2] = 0.0\n\n # Binary cross entropy to learn the likelihood (AALR-specific)\n return nn.BCELoss()(likelihood, labels)\n", "path": "sbi/inference/snre/snre_a.py"}], "after_files": [{"content": "from typing import Any, Callable, Dict, Optional, Union\n\nimport torch\nfrom torch import Tensor, nn, ones\n\nfrom sbi.inference.posteriors.base_posterior import NeuralPosterior\nfrom sbi.inference.snre.snre_base import RatioEstimator\nfrom sbi.types import TensorboardSummaryWriter\nfrom sbi.utils import del_entries\n\n\nclass SNRE_A(RatioEstimator):\n def __init__(\n self,\n prior,\n classifier: Union[str, Callable] = \"resnet\",\n device: str = \"cpu\",\n logging_level: Union[int, str] = \"warning\",\n summary_writer: Optional[TensorboardSummaryWriter] = None,\n show_progress_bars: bool = True,\n **unused_args\n ):\n r\"\"\"AALR[1], here known as SNRE_A.\n\n [1] _Likelihood-free MCMC with Amortized Approximate Likelihood Ratios_, Hermans\n et al., ICML 2020, https://arxiv.org/abs/1903.04057\n\n Args:\n prior: A probability distribution that expresses prior knowledge about the\n parameters, e.g. which ranges are meaningful for them. Any\n object with `.log_prob()`and `.sample()` (for example, a PyTorch\n distribution) can be used.\n classifier: Classifier trained to approximate likelihood ratios. If it is\n a string, use a pre-configured network of the provided type (one of\n linear, mlp, resnet). Alternatively, a function that builds a custom\n neural network can be provided. The function will be called with the\n first batch of simulations (theta, x), which can thus be used for shape\n inference and potentially for z-scoring. It needs to return a PyTorch\n `nn.Module` implementing the classifier.\n device: torch device on which to compute, e.g. gpu, cpu.\n logging_level: Minimum severity of messages to log. One of the strings\n INFO, WARNING, DEBUG, ERROR and CRITICAL.\n summary_writer: A tensorboard `SummaryWriter` to control, among others, log\n file location (default is `<current working directory>/logs`.)\n show_progress_bars: Whether to show a progressbar during simulation and\n sampling.\n unused_args: Absorbs additional arguments. No entries will be used. If it\n is not empty, we warn. In future versions, when the new interface of\n 0.14.0 is more mature, we will remove this argument.\n \"\"\"\n\n kwargs = del_entries(locals(), entries=(\"self\", \"__class__\", \"unused_args\"))\n super().__init__(**kwargs, **unused_args)\n\n def train(\n self,\n training_batch_size: int = 50,\n learning_rate: float = 5e-4,\n validation_fraction: float = 0.1,\n stop_after_epochs: int = 20,\n max_num_epochs: Optional[int] = None,\n clip_max_norm: Optional[float] = 5.0,\n exclude_invalid_x: bool = True,\n resume_training: bool = False,\n discard_prior_samples: bool = False,\n retrain_from_scratch_each_round: bool = False,\n show_train_summary: bool = False,\n ) -> NeuralPosterior:\n r\"\"\"\n Return classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n\n Args:\n training_batch_size: Training batch size.\n learning_rate: Learning rate for Adam optimizer.\n validation_fraction: The fraction of data to use for validation.\n stop_after_epochs: The number of epochs to wait for improvement on the\n validation set before terminating training.\n max_num_epochs: Maximum number of epochs to run. If reached, we stop\n training even when the validation loss is still decreasing. If None, we\n train until validation loss increases (see also `stop_after_epochs`).\n clip_max_norm: Value at which to clip the total gradient norm in order to\n prevent exploding gradients. Use None for no clipping.\n exclude_invalid_x: Whether to exclude simulation outputs `x=NaN` or `x=\u00b1\u221e`\n during training. Expect errors, silent or explicit, when `False`.\n resume_training: Can be used in case training time is limited, e.g. on a\n cluster. If `True`, the split between train and validation set, the\n optimizer, the number of epochs, and the best validation log-prob will\n be restored from the last time `.train()` was called.\n discard_prior_samples: Whether to discard samples simulated in round 1, i.e.\n from the prior. Training may be sped up by ignoring such less targeted\n samples.\n retrain_from_scratch_each_round: Whether to retrain the conditional density\n estimator for the posterior from scratch each round.\n show_train_summary: Whether to print the number of epochs and validation\n loss and leakage after the training.\n\n Returns:\n Classifier that approximates the ratio $p(\\theta,x)/p(\\theta)p(x)$.\n \"\"\"\n\n # AALR is defined for `num_atoms=2`.\n # Proxy to `super().__call__` to ensure right parameter.\n kwargs = del_entries(locals(), entries=(\"self\", \"__class__\"))\n return super().train(**kwargs, num_atoms=2)\n\n def _loss(self, theta: Tensor, x: Tensor, num_atoms: int) -> Tensor:\n \"\"\"\n Returns the binary cross-entropy loss for the trained classifier.\n\n The classifier takes as input a $(\\theta,x)$ pair. It is trained to predict 1\n if the pair was sampled from the joint $p(\\theta,x)$, and to predict 0 if the\n pair was sampled from the marginals $p(\\theta)p(x)$.\n \"\"\"\n\n assert theta.shape[0] == x.shape[0], \"Batch sizes for theta and x must match.\"\n batch_size = theta.shape[0]\n\n logits = self._classifier_logits(theta, x, num_atoms)\n likelihood = torch.sigmoid(logits).squeeze()\n\n # Alternating pairs where there is one sampled from the joint and one\n # sampled from the marginals. The first element is sampled from the\n # joint p(theta, x) and is labelled 1. The second element is sampled\n # from the marginals p(theta)p(x) and is labelled 0. And so on.\n labels = ones(2 * batch_size, device=self._device) # two atoms\n labels[1::2] = 0.0\n\n # Binary cross entropy to learn the likelihood (AALR-specific)\n return nn.BCELoss()(likelihood, labels)\n", "path": "sbi/inference/snre/snre_a.py"}]}
| 2,121 | 188 |
gh_patches_debug_10501
|
rasdani/github-patches
|
git_diff
|
pypa__virtualenv-1964
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'NoneType' object has no attribute 'group' with virtualenv 20.0.32 on CygWin
**Issue**
We are also testing on CygWin (using appveyor), and since this morning, tox fails creating a virtualenv with an AttributeError. Unfortunately, tox does not display the entire traceback, but just the exception.
Since virtualenv 20.0.32 was released just 4h ago, I suspect that to be the culprit.
From https://ci.appveyor.com/project/andy-maier/pywbem/builds/35526352/job/l3k6a2vb39bweqsw#L936:
```
if "%UNIX_PATH%"=="C:\cygwin64\bin" ( bash -c "which tox && tox -vv -e %TOX_ENV% && echo appveyor.yml: tox rc=$?" )
/usr/bin/tox
using tox.ini: /cygdrive/c/projects/pywbem/tox.ini (pid 1822)
using tox-3.20.0 from /usr/lib/python3.8/site-packages/tox/__init__.py (pid 1822)
skipping sdist step
cygwin64_py38 uses /usr/bin/python3.8.exe
cygwin64_py38 start: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38
cygwin64_py38 cannot reuse: no previous config /cygdrive/c/projects/pywbem/.tox/cygwin64_py38/.tox-config1
cygwin64_py38 create: /cygdrive/c/projects/pywbem/.tox/cygwin64_py38
setting PATH=/cygdrive/c/projects/pywbem/.tox/cygwin64_py38/bin:/usr/bin:/cygdrive/c/Windows/system32:/cygdrive/c/Windows:/cygdrive/c/ProgramData/chocolatey/bin
[1825] /cygdrive/c/projects/pywbem/.tox$ /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38
AttributeError: 'NoneType' object has no attribute 'group'
ERROR: invocation failed (exit code 1)
ERROR: InvocationError for command /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 (exited with code 1)
cygwin64_py38 finish: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 after 4.23 seconds
```
I am setting up a direct invocation of virtualenv in that environment, in order to get the full traceback, and will post that here.
**Environment**
Provide at least:
- OS: CygWin64
- ``pip list`` of the host python where ``virtualenv`` is installed:
```console
See next comment, below
```
**Output of the virtual environment creation**
Make sure to run the creation with `-vvv --with-traceback`:
```console
See next comment, below
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/activation/via_template.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import re
5 import sys
6 import sysconfig
7 from abc import ABCMeta, abstractmethod
8
9 from six import add_metaclass
10
11 from virtualenv.util.six import ensure_text
12
13 from .activator import Activator
14
15 if sys.version_info >= (3, 7):
16 from importlib.resources import read_binary
17 else:
18 from importlib_resources import read_binary
19
20
21 @add_metaclass(ABCMeta)
22 class ViaTemplateActivator(Activator):
23 @abstractmethod
24 def templates(self):
25 raise NotImplementedError
26
27 def generate(self, creator):
28 dest_folder = creator.bin_dir
29 replacements = self.replacements(creator, dest_folder)
30 generated = self._generate(replacements, self.templates(), dest_folder, creator)
31 if self.flag_prompt is not None:
32 creator.pyenv_cfg["prompt"] = self.flag_prompt
33 return generated
34
35 def replacements(self, creator, dest_folder):
36 current_platform = sysconfig.get_platform()
37 platforms = ["mingw", "cygwin", "msys"]
38 if any(platform in current_platform for platform in platforms):
39 pattern = re.compile("^([A-Za-z]):(.*)")
40 match = pattern.match(str(creator.dest))
41 virtual_env = "/" + match.group(1).lower() + match.group(2)
42 else:
43 virtual_env = str(creator.dest)
44 return {
45 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
46 "__VIRTUAL_ENV__": ensure_text(virtual_env),
47 "__VIRTUAL_NAME__": creator.env_name,
48 "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
49 "__PATH_SEP__": ensure_text(os.pathsep),
50 }
51
52 def _generate(self, replacements, templates, to_folder, creator):
53 generated = []
54 for template in templates:
55 text = self.instantiate_template(replacements, template, creator)
56 dest = to_folder / self.as_name(template)
57 # use write_bytes to avoid platform specific line normalization (\n -> \r\n)
58 dest.write_bytes(text.encode("utf-8"))
59 generated.append(dest)
60 return generated
61
62 def as_name(self, template):
63 return template.name
64
65 def instantiate_template(self, replacements, template, creator):
66 # read content as binary to avoid platform specific line normalization (\n -> \r\n)
67 binary = read_binary(self.__module__, str(template))
68 text = binary.decode("utf-8", errors="strict")
69 for key, value in replacements.items():
70 value = self._repr_unicode(creator, value)
71 text = text.replace(key, value)
72 return text
73
74 @staticmethod
75 def _repr_unicode(creator, value):
76 # by default we just let it be unicode
77 return value
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
--- a/src/virtualenv/activation/via_template.py
+++ b/src/virtualenv/activation/via_template.py
@@ -38,7 +38,10 @@
if any(platform in current_platform for platform in platforms):
pattern = re.compile("^([A-Za-z]):(.*)")
match = pattern.match(str(creator.dest))
- virtual_env = "/" + match.group(1).lower() + match.group(2)
+ if match:
+ virtual_env = "/" + match.group(1).lower() + match.group(2)
+ else:
+ virtual_env = str(creator.dest)
else:
virtual_env = str(creator.dest)
return {
|
{"golden_diff": "diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -38,7 +38,10 @@\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n- virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n+ if match:\n+ virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n+ else:\n+ virtual_env = str(creator.dest)\n else:\n virtual_env = str(creator.dest)\n return {\n", "issue": "AttributeError: 'NoneType' object has no attribute 'group' with virtualenv 20.0.32 on CygWin\n**Issue**\r\nWe are also testing on CygWin (using appveyor), and since this morning, tox fails creating a virtualenv with an AttributeError. Unfortunately, tox does not display the entire traceback, but just the exception.\r\nSince virtualenv 20.0.32 was released just 4h ago, I suspect that to be the culprit.\r\n\r\nFrom https://ci.appveyor.com/project/andy-maier/pywbem/builds/35526352/job/l3k6a2vb39bweqsw#L936:\r\n```\r\nif \"%UNIX_PATH%\"==\"C:\\cygwin64\\bin\" ( bash -c \"which tox && tox -vv -e %TOX_ENV% && echo appveyor.yml: tox rc=$?\" )\r\n/usr/bin/tox\r\nusing tox.ini: /cygdrive/c/projects/pywbem/tox.ini (pid 1822)\r\nusing tox-3.20.0 from /usr/lib/python3.8/site-packages/tox/__init__.py (pid 1822)\r\nskipping sdist step\r\ncygwin64_py38 uses /usr/bin/python3.8.exe\r\ncygwin64_py38 start: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38\r\ncygwin64_py38 cannot reuse: no previous config /cygdrive/c/projects/pywbem/.tox/cygwin64_py38/.tox-config1\r\ncygwin64_py38 create: /cygdrive/c/projects/pywbem/.tox/cygwin64_py38\r\nsetting PATH=/cygdrive/c/projects/pywbem/.tox/cygwin64_py38/bin:/usr/bin:/cygdrive/c/Windows/system32:/cygdrive/c/Windows:/cygdrive/c/ProgramData/chocolatey/bin\r\n[1825] /cygdrive/c/projects/pywbem/.tox$ /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\nERROR: invocation failed (exit code 1)\r\nERROR: InvocationError for command /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 (exited with code 1)\r\ncygwin64_py38 finish: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 after 4.23 seconds\r\n```\r\n\r\nI am setting up a direct invocation of virtualenv in that environment, in order to get the full traceback, and will post that here.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: CygWin64\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n ```console\r\n See next comment, below\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\n\r\n```console\r\nSee next comment, below\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport re\nimport sys\nimport sysconfig\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_binary\nelse:\n from importlib_resources import read_binary\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n generated = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return generated\n\n def replacements(self, creator, dest_folder):\n current_platform = sysconfig.get_platform()\n platforms = [\"mingw\", \"cygwin\", \"msys\"]\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n else:\n virtual_env = str(creator.dest)\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(virtual_env),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n generated = []\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n # use write_bytes to avoid platform specific line normalization (\\n -> \\r\\n)\n dest.write_bytes(text.encode(\"utf-8\"))\n generated.append(dest)\n return generated\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read content as binary to avoid platform specific line normalization (\\n -> \\r\\n)\n binary = read_binary(self.__module__, str(template))\n text = binary.decode(\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport re\nimport sys\nimport sysconfig\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_binary\nelse:\n from importlib_resources import read_binary\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n generated = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return generated\n\n def replacements(self, creator, dest_folder):\n current_platform = sysconfig.get_platform()\n platforms = [\"mingw\", \"cygwin\", \"msys\"]\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n if match:\n virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n else:\n virtual_env = str(creator.dest)\n else:\n virtual_env = str(creator.dest)\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(virtual_env),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n generated = []\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n # use write_bytes to avoid platform specific line normalization (\\n -> \\r\\n)\n dest.write_bytes(text.encode(\"utf-8\"))\n generated.append(dest)\n return generated\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read content as binary to avoid platform specific line normalization (\\n -> \\r\\n)\n binary = read_binary(self.__module__, str(template))\n text = binary.decode(\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]}
| 1,734 | 175 |
gh_patches_debug_2631
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-693
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tests_selenium.py:976: Resurrect test `test_cart_page_open`
The puzzle `473-5159ab9c` from #473 has to be resolved:
https://github.com/fidals/shopelectro/blob/f7dc2793dc5c7eddb2e68a68368337d77ba3139e/shopelectro/tests/tests_selenium.py#L976-L976
The puzzle was created by duker33 on 08-Aug-18.
Estimate: 15 minutes,
If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/models.py`
Content:
```
1 import random
2 import string
3 import typing
4 from uuid import uuid4
5
6 from django.conf import settings
7 from django.db import models
8 from django.urls import reverse
9 from django.utils.translation import ugettext_lazy as _
10 import mptt
11
12 from catalog import models as catalog_models
13 from ecommerce import models as ecommerce_models
14 from pages import models as pages_models
15
16
17 def randomize_slug(slug: str) -> str:
18 slug_hash = ''.join(
19 random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)
20 )
21 return f'{slug}_{slug_hash}'
22
23
24 class SECategoryQuerySet(catalog_models.CategoryQuerySet):
25 def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':
26 categories_with_pictures = (
27 self
28 .filter(products__page__images__isnull=False)
29 .distinct()
30 )
31
32 return categories_with_pictures.get_ancestors(include_self=True)
33
34
35 class SECategoryManager(
36 catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)
37 ):
38 pass
39
40
41 class Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):
42
43 objects = SECategoryManager()
44 uuid = models.UUIDField(default=uuid4, editable=False)
45
46 @classmethod
47 def get_default_parent(cls):
48 return pages_models.CustomPage.objects.filter(slug='catalog').first()
49
50 @property
51 def image(self):
52 products = self.products.all()
53 return products[0].image if products else None
54
55 def get_absolute_url(self):
56 return reverse('category', args=(self.page.slug,))
57
58
59 class Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):
60
61 # That's why we are needed to explicitly add objects manager here
62 # because of Django special managers behaviour.
63 # Se se#480 for details.
64 objects = catalog_models.ProductManager()
65
66 category = models.ForeignKey(
67 Category,
68 on_delete=models.CASCADE,
69 null=True,
70 related_name='products',
71 verbose_name=_('category'),
72 )
73
74 tags = models.ManyToManyField(
75 'Tag',
76 related_name='products',
77 blank=True,
78 verbose_name=_('tags'),
79 )
80
81 vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))
82 uuid = models.UUIDField(default=uuid4, editable=False)
83 purchase_price = models.FloatField(
84 default=0, verbose_name=_('purchase_price'))
85 wholesale_small = models.FloatField(
86 default=0, verbose_name=_('wholesale_small'))
87 wholesale_medium = models.FloatField(
88 default=0, verbose_name=_('wholesale_medium'))
89 wholesale_large = models.FloatField(
90 default=0, verbose_name=_('wholesale_large'))
91
92 def get_absolute_url(self):
93 return reverse('product', args=(self.vendor_code,))
94
95 @property
96 def average_rate(self):
97 """Return rounded to first decimal averaged rating."""
98 rating = self.product_feedbacks.aggregate(
99 avg=models.Avg('rating')).get('avg', 0)
100 return round(rating, 1)
101
102 @property
103 def feedback_count(self):
104 return self.product_feedbacks.count()
105
106 @property
107 def feedback(self):
108 return self.product_feedbacks.all().order_by('-date')
109
110 def get_params(self):
111 return Tag.objects.filter_by_products([self]).get_group_tags_pairs()
112
113 def get_brand_name(self) -> str:
114 brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
115 return brand.name if brand else ''
116
117
118 class ProductFeedback(models.Model):
119 product = models.ForeignKey(
120 Product, on_delete=models.CASCADE, null=True,
121 related_name='product_feedbacks'
122 )
123
124 date = models.DateTimeField(
125 auto_now=True, db_index=True, verbose_name=_('date'))
126 name = models.CharField(
127 max_length=255, db_index=True, verbose_name=_('name'))
128 rating = models.PositiveSmallIntegerField(
129 default=1, db_index=True, verbose_name=_('rating'))
130 dignities = models.TextField(
131 default='', blank=True, verbose_name=_('dignities'))
132 limitations = models.TextField(
133 default='', blank=True, verbose_name=_('limitations'))
134 general = models.TextField(
135 default='', blank=True, verbose_name=_('limitations'))
136
137
138 def _default_payment():
139 """Default payment option is first element of first tuple in options."""
140 assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'
141 return settings.PAYMENT_OPTIONS[0][0]
142
143
144 class Order(ecommerce_models.Order):
145 address = models.TextField(blank=True, default='')
146 payment_type = models.CharField(
147 max_length=255,
148 choices=settings.PAYMENT_OPTIONS,
149 default=_default_payment()
150 )
151 comment = models.TextField(blank=True, default='')
152 # total price - total purchase price
153 revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))
154
155 @property
156 def payment_type_name(self):
157 """Return name for an order's payment option."""
158 return next(
159 name for option, name in settings.PAYMENT_OPTIONS
160 if self.payment_type == option
161 )
162
163 def set_positions(self, cart):
164 """
165 Save cart's state into Order instance.
166
167 @todo #589:60m Create Cart model.
168 See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672
169 """
170 self.revenue = cart.total_revenue()
171 self.save()
172 for id_, position in cart:
173 self.positions.create(
174 order=self,
175 product_id=id_,
176 vendor_code=position['vendor_code'],
177 name=position['name'],
178 price=position['price'],
179 quantity=position['quantity'],
180 )
181 return self
182
183
184 class CategoryPage(pages_models.ModelPage):
185 """Create proxy model for Admin."""
186
187 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
188 proxy = True
189
190 # noinspection PyTypeChecker
191 objects = pages_models.ModelPage.create_model_page_managers(Category)
192
193
194 class ProductPage(pages_models.ModelPage):
195 """Create proxy model for Admin."""
196
197 class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)
198 proxy = True
199
200 # noinspection PyTypeChecker
201 objects = (
202 pages_models.ModelPage
203 .create_model_page_managers(Product)
204 )
205
206
207 class TagGroup(catalog_models.TagGroup):
208 pass
209
210
211 class TagQuerySet(catalog_models.TagQuerySet):
212 pass
213
214
215 class Tag(catalog_models.Tag):
216 group = models.ForeignKey(
217 TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
218 )
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/shopelectro/models.py b/shopelectro/models.py
--- a/shopelectro/models.py
+++ b/shopelectro/models.py
@@ -7,7 +7,6 @@
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
-import mptt
from catalog import models as catalog_models
from ecommerce import models as ecommerce_models
|
{"golden_diff": "diff --git a/shopelectro/models.py b/shopelectro/models.py\n--- a/shopelectro/models.py\n+++ b/shopelectro/models.py\n@@ -7,7 +7,6 @@\n from django.db import models\n from django.urls import reverse\n from django.utils.translation import ugettext_lazy as _\n-import mptt\n \n from catalog import models as catalog_models\n from ecommerce import models as ecommerce_models\n", "issue": "tests_selenium.py:976: Resurrect test `test_cart_page_open`\nThe puzzle `473-5159ab9c` from #473 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/f7dc2793dc5c7eddb2e68a68368337d77ba3139e/shopelectro/tests/tests_selenium.py#L976-L976\n\nThe puzzle was created by duker33 on 08-Aug-18. \n\nEstimate: 15 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "import random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nimport mptt\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\ndef _default_payment():\n \"\"\"Default payment option is first element of first tuple in options.\"\"\"\n assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'\n return settings.PAYMENT_OPTIONS[0][0]\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=settings.PAYMENT_OPTIONS,\n default=_default_payment()\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_name(self):\n \"\"\"Return name for an order's payment option.\"\"\"\n return next(\n name for option, name in settings.PAYMENT_OPTIONS\n if self.payment_type == option\n )\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n", "path": "shopelectro/models.py"}], "after_files": [{"content": "import random\nimport string\nimport typing\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom catalog import models as catalog_models\nfrom ecommerce import models as ecommerce_models\nfrom pages import models as pages_models\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(catalog_models.CategoryQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(\n catalog_models.CategoryManager.from_queryset(SECategoryQuerySet)\n):\n pass\n\n\nclass Category(catalog_models.AbstractCategory, pages_models.SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return pages_models.CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(catalog_models.AbstractProduct, pages_models.SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = catalog_models.ProductManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n\n def get_brand_name(self) -> str:\n brand: typing.Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\ndef _default_payment():\n \"\"\"Default payment option is first element of first tuple in options.\"\"\"\n assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'\n return settings.PAYMENT_OPTIONS[0][0]\n\n\nclass Order(ecommerce_models.Order):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=settings.PAYMENT_OPTIONS,\n default=_default_payment()\n )\n comment = models.TextField(blank=True, default='')\n # total price - total purchase price\n revenue = models.FloatField(default=0, null=True, verbose_name=_('revenue'))\n\n @property\n def payment_type_name(self):\n \"\"\"Return name for an order's payment option.\"\"\"\n return next(\n name for option, name in settings.PAYMENT_OPTIONS\n if self.payment_type == option\n )\n\n def set_positions(self, cart):\n \"\"\"\n Save cart's state into Order instance.\n\n @todo #589:60m Create Cart model.\n See details here: https://github.com/fidals/shopelectro/pull/590#discussion_r222544672\n \"\"\"\n self.revenue = cart.total_revenue()\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity'],\n )\n return self\n\n\nclass CategoryPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = pages_models.ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(pages_models.ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(pages_models.ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n # noinspection PyTypeChecker\n objects = (\n pages_models.ModelPage\n .create_model_page_managers(Product)\n )\n\n\nclass TagGroup(catalog_models.TagGroup):\n pass\n\n\nclass TagQuerySet(catalog_models.TagQuerySet):\n pass\n\n\nclass Tag(catalog_models.Tag):\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n", "path": "shopelectro/models.py"}]}
| 2,489 | 89 |
gh_patches_debug_22280
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix download_mmdet_configs
No need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/mmdet/download_configs.py`
Content:
```
1 __all__ = ["download_mmdet_configs"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5
6 VERSION = "v2.10.0"
7 BASE_URL = "https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags"
8
9
10 def download_mmdet_configs() -> Path:
11 save_dir = get_root_dir() / f"mmdetection_configs"
12 save_dir.mkdir(parents=True, exist_ok=True)
13
14 download_path = save_dir / f"{VERSION}.zip"
15 if not download_path.exists():
16 logger.info("Downloading mmdet configs")
17
18 download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
19
20 return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py
--- a/icevision/models/mmdet/download_configs.py
+++ b/icevision/models/mmdet/download_configs.py
@@ -9,12 +9,26 @@
def download_mmdet_configs() -> Path:
save_dir = get_root_dir() / f"mmdetection_configs"
- save_dir.mkdir(parents=True, exist_ok=True)
+ mmdet_config_path = save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
download_path = save_dir / f"{VERSION}.zip"
- if not download_path.exists():
- logger.info("Downloading mmdet configs")
- download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
+ if mmdet_config_path.exists():
+ logger.info(
+ f"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}"
+ )
+ elif download_path.exists():
+ # The zip file was downloaded by not extracted yet
+ # Extract zip file
+ logger.info(f"Extracting the {VERSION}.zip file.")
+ save_dir = Path(download_path).parent
+ shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))
+ else:
+ save_dir.mkdir(parents=True, exist_ok=True)
- return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
+ download_path = save_dir / f"{VERSION}.zip"
+ if not download_path.exists():
+ logger.info("Downloading mmdet configs")
+ download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
+
+ return mmdet_config_path
|
{"golden_diff": "diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py\n--- a/icevision/models/mmdet/download_configs.py\n+++ b/icevision/models/mmdet/download_configs.py\n@@ -9,12 +9,26 @@\n \n def download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n- save_dir.mkdir(parents=True, exist_ok=True)\n \n+ mmdet_config_path = save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n download_path = save_dir / f\"{VERSION}.zip\"\n- if not download_path.exists():\n- logger.info(\"Downloading mmdet configs\")\n \n- download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n+ if mmdet_config_path.exists():\n+ logger.info(\n+ f\"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}\"\n+ )\n+ elif download_path.exists():\n+ # The zip file was downloaded by not extracted yet\n+ # Extract zip file\n+ logger.info(f\"Extracting the {VERSION}.zip file.\")\n+ save_dir = Path(download_path).parent\n+ shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))\n+ else:\n+ save_dir.mkdir(parents=True, exist_ok=True)\n \n- return save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n+ download_path = save_dir / f\"{VERSION}.zip\"\n+ if not download_path.exists():\n+ logger.info(\"Downloading mmdet configs\")\n+ download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n+\n+ return mmdet_config_path\n", "issue": "Fix download_mmdet_configs\nNo need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation.\r\n\n", "before_files": [{"content": "__all__ = [\"download_mmdet_configs\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\n\nVERSION = \"v2.10.0\"\nBASE_URL = \"https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags\"\n\n\ndef download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n save_dir.mkdir(parents=True, exist_ok=True)\n\n download_path = save_dir / f\"{VERSION}.zip\"\n if not download_path.exists():\n logger.info(\"Downloading mmdet configs\")\n\n download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n\n return save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n", "path": "icevision/models/mmdet/download_configs.py"}], "after_files": [{"content": "__all__ = [\"download_mmdet_configs\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\n\nVERSION = \"v2.10.0\"\nBASE_URL = \"https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags\"\n\n\ndef download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n\n mmdet_config_path = save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n download_path = save_dir / f\"{VERSION}.zip\"\n\n if mmdet_config_path.exists():\n logger.info(\n f\"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}\"\n )\n elif download_path.exists():\n # The zip file was downloaded by not extracted yet\n # Extract zip file\n logger.info(f\"Extracting the {VERSION}.zip file.\")\n save_dir = Path(download_path).parent\n shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))\n else:\n save_dir.mkdir(parents=True, exist_ok=True)\n\n download_path = save_dir / f\"{VERSION}.zip\"\n if not download_path.exists():\n logger.info(\"Downloading mmdet configs\")\n download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n\n return mmdet_config_path\n", "path": "icevision/models/mmdet/download_configs.py"}]}
| 496 | 388 |
gh_patches_debug_10923
|
rasdani/github-patches
|
git_diff
|
napari__napari-2930
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SettingsManager._load not working with Field(default_factory=...)
## 🐛 Bug
Ran into a [test failure](https://github.com/napari/napari/pull/2695/checks?check_run_id=2925027700#step:7:250) on #2695 that looks to be a bug in the Settings Manager.
The preferred way to add a default mutable container to a pydantic model is to use `default_factor`, but currently, if you do this, you'll get an error:
```python
class PluginsSettings(BaseNapariSettings):
# this works, and is used in a couple places in the settings
# but is not how an empty mutable object should be declared
some_setting: Dict[str, str] = Field(dict())
# this is the preferred way to create an empty mutable
# but it doesn't work:
some_other_setting: Dict[str, str] = Field(default_factory=dict)
```
traceback:
```pytb
napari/utils/settings/_manager.py:262: in get_settings
SETTINGS = SettingsManager(config_path=config_path)
napari/utils/settings/_manager.py:90: in __init__
self._load()
napari/utils/settings/_manager.py:186: in _load
self._defaults[section] = setting(**_section_defaults)
pydantic/env_settings.py:36: in pydantic.env_settings.BaseSettings.__init__
???
napari/utils/events/evented_model.py:129: in __init__
super().__init__(**kwargs)
pydantic/main.py:406: in pydantic.main.BaseModel.__init__
???
E pydantic.error_wrappers.ValidationError: 2 validation errors for PluginsSettings
E extension2reader
E none is not an allowed value (type=type_error.none.not_allowed)
E extension2writer
E none is not an allowed value (type=type_error.none.not_allowed)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/settings/_manager.py`
Content:
```
1 """Settings management.
2 """
3
4 import json
5 import os
6 import warnings
7 from pathlib import Path
8 from typing import Any, Dict, List, Optional, Union
9
10 from appdirs import user_config_dir
11 from yaml import safe_dump, safe_load
12
13 from ...utils.translations import trans
14 from .._base import _APPAUTHOR, _APPNAME, _FILENAME
15 from ._defaults import CORE_SETTINGS as CORE_SETTINGS
16 from ._defaults import (
17 AppearanceSettings,
18 ApplicationSettings,
19 BaseNapariSettings,
20 ExperimentalSettings,
21 PluginsSettings,
22 ShortcutsSettings,
23 )
24
25
26 class _SettingsMixin:
27 appearance: AppearanceSettings
28 application: ApplicationSettings
29 plugins: PluginsSettings
30 shortcuts: ShortcutsSettings
31 experimental: ExperimentalSettings
32
33
34 class SettingsManager(_SettingsMixin):
35 """
36 Napari settings manager using evented SettingsModels.
37
38 This provides the presistence layer for the application settings.
39
40 Parameters
41 ----------
42 config_path : str, optional
43 Provide the base folder to store napari configuration. Default is None,
44 which will point to user config provided by `appdirs`.
45 save_to_disk : bool, optional
46 Persist settings on disk. Default is True.
47
48 Notes
49 -----
50 The settings manager will create a new user configuration folder which is
51 provided by `appdirs` in a cross platform manner. On the first startup a
52 new configuration file will be created using the default values defined by
53 the `CORE_SETTINGS` models.
54
55 If a configuration file is found in the specified location, it will be
56 loaded by the `_load` method. On configuration load the following checks
57 are performed:
58
59 - If invalid sections are found, these will be removed from the file.
60 - If invalid keys are found within a valid section, these will be removed
61 from the file.
62 - If invalid values are found within valid sections and valid keys, these
63 will be replaced by the default value provided by `CORE_SETTINGS`
64 models.
65 """
66
67 _FILENAME = _FILENAME
68 _APPNAME = _APPNAME
69 _APPAUTHOR = _APPAUTHOR
70
71 def __init__(
72 self,
73 config_path: Optional[Path] = None,
74 save_to_disk: bool = True,
75 ):
76 self._config_path = (
77 Path(user_config_dir(self._APPNAME, self._APPAUTHOR))
78 if config_path is None
79 else Path(config_path)
80 )
81 self._save_to_disk = save_to_disk
82 self._settings: Dict[str, BaseNapariSettings] = {}
83 self._defaults: Dict[str, BaseNapariSettings] = {}
84 self._plugins: List[str] = []
85 self._env_settings: Dict[str, Any] = {}
86
87 if not self._config_path.is_dir():
88 os.makedirs(self._config_path)
89
90 self._load()
91
92 def __getattr__(self, attr):
93 if attr in self._settings:
94 return self._settings[attr]
95
96 def __dir__(self):
97 """Add setting keys to make tab completion works."""
98 return list(super().__dir__()) + list(self._settings)
99
100 def __str__(self):
101 return safe_dump(self._to_dict(safe=True))
102
103 def _remove_default(self, settings_data):
104 """
105 Attempt to convert self to dict and to remove any default values from the configuration
106 """
107 for section, values in settings_data.items():
108 if section not in self._defaults:
109 continue
110
111 default_values = self._defaults[section].dict()
112 for k, v in list(values.items()):
113 if default_values.get(k, None) == v:
114 del values[k]
115
116 return settings_data
117
118 def _to_dict(self, safe: bool = False) -> dict:
119 """Convert the settings to a dictionary."""
120 data = {}
121 for section, model in self._settings.items():
122 if safe:
123 # We roundtrip to keep string objects (like SchemaVersion)
124 # yaml representable
125 data[section] = json.loads(model.json())
126 else:
127 data[section] = model.dict()
128
129 return data
130
131 def _save(self):
132 """Save configuration to disk."""
133 if self._save_to_disk:
134 path = self.path / self._FILENAME
135
136 if self._env_settings:
137 # If using environment variables do not save them in the
138 # `settings.yaml` file. We just delete any keys loaded
139 # as environment variables
140 data = self._to_dict(safe=True)
141 for section, env_data in self._env_settings.items():
142 for k, v in env_data.items():
143 del data[section][k]
144 else:
145 data = self._to_dict(safe=True)
146
147 with open(path, "w") as fh:
148 fh.write(safe_dump(self._remove_default(data)))
149
150 def _load(self):
151 """Read configuration from disk."""
152 path = self.path / self._FILENAME
153
154 if path.is_file():
155 try:
156 with open(path) as fh:
157 data = safe_load(fh.read()) or {}
158 except Exception as err:
159 warnings.warn(
160 trans._(
161 "The content of the napari settings file could not be read\n\nThe default settings will be used and the content of the file will be replaced the next time settings are changed.\n\nError:\n{err}",
162 deferred=True,
163 err=err,
164 )
165 )
166 data = {}
167
168 # Load data once and save it in the base class
169 BaseNapariSettings._LOADED_DATA = data
170
171 for setting in CORE_SETTINGS:
172 section = setting.schema().get("section", None)
173 if section is None:
174 raise ValueError(
175 trans._(
176 "Settings model {setting!r} must provide a `section` in the `schemas_extra`",
177 deferred=True,
178 setting=setting,
179 )
180 )
181
182 _section_defaults = {}
183 for option, option_data in setting.schema()["properties"].items():
184 _section_defaults[option] = option_data.get("default", None)
185
186 self._defaults[section] = setting(**_section_defaults)
187 model = setting()
188 model.events.connect(lambda x: self._save())
189 self._settings[section] = model
190 self._env_settings[section] = getattr(
191 model.__config__, "_env_settings"
192 )(model)
193
194 self._save()
195
196 @property
197 def path(self):
198 return self._config_path
199
200 def reset(self):
201 """Reset settings to default values."""
202 for section in self._settings:
203 for key, default_value in self._defaults[section].dict().items():
204 setattr(self._settings[section], key, default_value)
205
206 self._save()
207
208 def schemas(self) -> dict:
209 """Return the json schema for each of the settings model."""
210 schemas = {}
211 for section, settings in self._settings.items():
212 schemas[section] = {
213 "json_schema": settings.schema_json(),
214 "model": settings,
215 }
216
217 return schemas
218
219 def register_plugin(self, plugin):
220 """Register plugin settings with the settings manager.
221
222 Parameters
223 ----------
224 plugin
225 The napari plugin that may or may not provide settings.
226 """
227 self._plugins.append(plugin)
228
229
230 class _SettingsProxy(_SettingsMixin):
231 """Backwards compatibility layer."""
232
233 def __getattribute__(self, name) -> Any:
234 return getattr(get_settings(), name)
235
236
237 SETTINGS: Union[SettingsManager, _SettingsProxy] = _SettingsProxy()
238
239
240 def get_settings(path: Optional[Union[Path, str]] = None) -> SettingsManager:
241 """
242 Get settings for a given path.
243
244 Parameters
245 ----------
246 path: Path, optional
247 The path to read/write the settings from.
248
249 Returns
250 -------
251 SettingsManager
252 The settings manager.
253
254 Notes
255 -----
256 The path can only be set once per session.
257 """
258 global SETTINGS
259
260 if isinstance(SETTINGS, _SettingsProxy):
261 config_path = Path(path).resolve() if path else None
262 SETTINGS = SettingsManager(config_path=config_path)
263 elif path is not None:
264 import inspect
265
266 curframe = inspect.currentframe()
267 calframe = inspect.getouterframes(curframe, 2)
268 raise Exception(
269 trans._(
270 "The path can only be set once per session. Settings called from {calframe[1][3]}",
271 deferred=True,
272 calframe=calframe,
273 )
274 )
275
276 return SETTINGS
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/napari/utils/settings/_manager.py b/napari/utils/settings/_manager.py
--- a/napari/utils/settings/_manager.py
+++ b/napari/utils/settings/_manager.py
@@ -179,11 +179,9 @@
)
)
- _section_defaults = {}
- for option, option_data in setting.schema()["properties"].items():
- _section_defaults[option] = option_data.get("default", None)
-
- self._defaults[section] = setting(**_section_defaults)
+ self._defaults[section] = setting(
+ **{k: v.get_default() for k, v in setting.__fields__.items()}
+ )
model = setting()
model.events.connect(lambda x: self._save())
self._settings[section] = model
|
{"golden_diff": "diff --git a/napari/utils/settings/_manager.py b/napari/utils/settings/_manager.py\n--- a/napari/utils/settings/_manager.py\n+++ b/napari/utils/settings/_manager.py\n@@ -179,11 +179,9 @@\n )\n )\n \n- _section_defaults = {}\n- for option, option_data in setting.schema()[\"properties\"].items():\n- _section_defaults[option] = option_data.get(\"default\", None)\n-\n- self._defaults[section] = setting(**_section_defaults)\n+ self._defaults[section] = setting(\n+ **{k: v.get_default() for k, v in setting.__fields__.items()}\n+ )\n model = setting()\n model.events.connect(lambda x: self._save())\n self._settings[section] = model\n", "issue": "SettingsManager._load not working with Field(default_factory=...)\n## \ud83d\udc1b Bug\r\nRan into a [test failure](https://github.com/napari/napari/pull/2695/checks?check_run_id=2925027700#step:7:250) on #2695 that looks to be a bug in the Settings Manager.\r\n\r\nThe preferred way to add a default mutable container to a pydantic model is to use `default_factor`, but currently, if you do this, you'll get an error:\r\n\r\n```python\r\nclass PluginsSettings(BaseNapariSettings):\r\n # this works, and is used in a couple places in the settings\r\n # but is not how an empty mutable object should be declared\r\n some_setting: Dict[str, str] = Field(dict())\r\n\r\n # this is the preferred way to create an empty mutable\r\n # but it doesn't work:\r\n some_other_setting: Dict[str, str] = Field(default_factory=dict)\r\n```\r\n\r\ntraceback:\r\n\r\n```pytb\r\nnapari/utils/settings/_manager.py:262: in get_settings\r\n SETTINGS = SettingsManager(config_path=config_path)\r\nnapari/utils/settings/_manager.py:90: in __init__\r\n self._load()\r\nnapari/utils/settings/_manager.py:186: in _load\r\n self._defaults[section] = setting(**_section_defaults)\r\npydantic/env_settings.py:36: in pydantic.env_settings.BaseSettings.__init__\r\n ???\r\nnapari/utils/events/evented_model.py:129: in __init__\r\n super().__init__(**kwargs)\r\npydantic/main.py:406: in pydantic.main.BaseModel.__init__\r\n ???\r\nE pydantic.error_wrappers.ValidationError: 2 validation errors for PluginsSettings\r\nE extension2reader\r\nE none is not an allowed value (type=type_error.none.not_allowed)\r\nE extension2writer\r\nE none is not an allowed value (type=type_error.none.not_allowed)\r\n\r\n```\n", "before_files": [{"content": "\"\"\"Settings management.\n\"\"\"\n\nimport json\nimport os\nimport warnings\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom appdirs import user_config_dir\nfrom yaml import safe_dump, safe_load\n\nfrom ...utils.translations import trans\nfrom .._base import _APPAUTHOR, _APPNAME, _FILENAME\nfrom ._defaults import CORE_SETTINGS as CORE_SETTINGS\nfrom ._defaults import (\n AppearanceSettings,\n ApplicationSettings,\n BaseNapariSettings,\n ExperimentalSettings,\n PluginsSettings,\n ShortcutsSettings,\n)\n\n\nclass _SettingsMixin:\n appearance: AppearanceSettings\n application: ApplicationSettings\n plugins: PluginsSettings\n shortcuts: ShortcutsSettings\n experimental: ExperimentalSettings\n\n\nclass SettingsManager(_SettingsMixin):\n \"\"\"\n Napari settings manager using evented SettingsModels.\n\n This provides the presistence layer for the application settings.\n\n Parameters\n ----------\n config_path : str, optional\n Provide the base folder to store napari configuration. Default is None,\n which will point to user config provided by `appdirs`.\n save_to_disk : bool, optional\n Persist settings on disk. Default is True.\n\n Notes\n -----\n The settings manager will create a new user configuration folder which is\n provided by `appdirs` in a cross platform manner. On the first startup a\n new configuration file will be created using the default values defined by\n the `CORE_SETTINGS` models.\n\n If a configuration file is found in the specified location, it will be\n loaded by the `_load` method. On configuration load the following checks\n are performed:\n\n - If invalid sections are found, these will be removed from the file.\n - If invalid keys are found within a valid section, these will be removed\n from the file.\n - If invalid values are found within valid sections and valid keys, these\n will be replaced by the default value provided by `CORE_SETTINGS`\n models.\n \"\"\"\n\n _FILENAME = _FILENAME\n _APPNAME = _APPNAME\n _APPAUTHOR = _APPAUTHOR\n\n def __init__(\n self,\n config_path: Optional[Path] = None,\n save_to_disk: bool = True,\n ):\n self._config_path = (\n Path(user_config_dir(self._APPNAME, self._APPAUTHOR))\n if config_path is None\n else Path(config_path)\n )\n self._save_to_disk = save_to_disk\n self._settings: Dict[str, BaseNapariSettings] = {}\n self._defaults: Dict[str, BaseNapariSettings] = {}\n self._plugins: List[str] = []\n self._env_settings: Dict[str, Any] = {}\n\n if not self._config_path.is_dir():\n os.makedirs(self._config_path)\n\n self._load()\n\n def __getattr__(self, attr):\n if attr in self._settings:\n return self._settings[attr]\n\n def __dir__(self):\n \"\"\"Add setting keys to make tab completion works.\"\"\"\n return list(super().__dir__()) + list(self._settings)\n\n def __str__(self):\n return safe_dump(self._to_dict(safe=True))\n\n def _remove_default(self, settings_data):\n \"\"\"\n Attempt to convert self to dict and to remove any default values from the configuration\n \"\"\"\n for section, values in settings_data.items():\n if section not in self._defaults:\n continue\n\n default_values = self._defaults[section].dict()\n for k, v in list(values.items()):\n if default_values.get(k, None) == v:\n del values[k]\n\n return settings_data\n\n def _to_dict(self, safe: bool = False) -> dict:\n \"\"\"Convert the settings to a dictionary.\"\"\"\n data = {}\n for section, model in self._settings.items():\n if safe:\n # We roundtrip to keep string objects (like SchemaVersion)\n # yaml representable\n data[section] = json.loads(model.json())\n else:\n data[section] = model.dict()\n\n return data\n\n def _save(self):\n \"\"\"Save configuration to disk.\"\"\"\n if self._save_to_disk:\n path = self.path / self._FILENAME\n\n if self._env_settings:\n # If using environment variables do not save them in the\n # `settings.yaml` file. We just delete any keys loaded\n # as environment variables\n data = self._to_dict(safe=True)\n for section, env_data in self._env_settings.items():\n for k, v in env_data.items():\n del data[section][k]\n else:\n data = self._to_dict(safe=True)\n\n with open(path, \"w\") as fh:\n fh.write(safe_dump(self._remove_default(data)))\n\n def _load(self):\n \"\"\"Read configuration from disk.\"\"\"\n path = self.path / self._FILENAME\n\n if path.is_file():\n try:\n with open(path) as fh:\n data = safe_load(fh.read()) or {}\n except Exception as err:\n warnings.warn(\n trans._(\n \"The content of the napari settings file could not be read\\n\\nThe default settings will be used and the content of the file will be replaced the next time settings are changed.\\n\\nError:\\n{err}\",\n deferred=True,\n err=err,\n )\n )\n data = {}\n\n # Load data once and save it in the base class\n BaseNapariSettings._LOADED_DATA = data\n\n for setting in CORE_SETTINGS:\n section = setting.schema().get(\"section\", None)\n if section is None:\n raise ValueError(\n trans._(\n \"Settings model {setting!r} must provide a `section` in the `schemas_extra`\",\n deferred=True,\n setting=setting,\n )\n )\n\n _section_defaults = {}\n for option, option_data in setting.schema()[\"properties\"].items():\n _section_defaults[option] = option_data.get(\"default\", None)\n\n self._defaults[section] = setting(**_section_defaults)\n model = setting()\n model.events.connect(lambda x: self._save())\n self._settings[section] = model\n self._env_settings[section] = getattr(\n model.__config__, \"_env_settings\"\n )(model)\n\n self._save()\n\n @property\n def path(self):\n return self._config_path\n\n def reset(self):\n \"\"\"Reset settings to default values.\"\"\"\n for section in self._settings:\n for key, default_value in self._defaults[section].dict().items():\n setattr(self._settings[section], key, default_value)\n\n self._save()\n\n def schemas(self) -> dict:\n \"\"\"Return the json schema for each of the settings model.\"\"\"\n schemas = {}\n for section, settings in self._settings.items():\n schemas[section] = {\n \"json_schema\": settings.schema_json(),\n \"model\": settings,\n }\n\n return schemas\n\n def register_plugin(self, plugin):\n \"\"\"Register plugin settings with the settings manager.\n\n Parameters\n ----------\n plugin\n The napari plugin that may or may not provide settings.\n \"\"\"\n self._plugins.append(plugin)\n\n\nclass _SettingsProxy(_SettingsMixin):\n \"\"\"Backwards compatibility layer.\"\"\"\n\n def __getattribute__(self, name) -> Any:\n return getattr(get_settings(), name)\n\n\nSETTINGS: Union[SettingsManager, _SettingsProxy] = _SettingsProxy()\n\n\ndef get_settings(path: Optional[Union[Path, str]] = None) -> SettingsManager:\n \"\"\"\n Get settings for a given path.\n\n Parameters\n ----------\n path: Path, optional\n The path to read/write the settings from.\n\n Returns\n -------\n SettingsManager\n The settings manager.\n\n Notes\n -----\n The path can only be set once per session.\n \"\"\"\n global SETTINGS\n\n if isinstance(SETTINGS, _SettingsProxy):\n config_path = Path(path).resolve() if path else None\n SETTINGS = SettingsManager(config_path=config_path)\n elif path is not None:\n import inspect\n\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n raise Exception(\n trans._(\n \"The path can only be set once per session. Settings called from {calframe[1][3]}\",\n deferred=True,\n calframe=calframe,\n )\n )\n\n return SETTINGS\n", "path": "napari/utils/settings/_manager.py"}], "after_files": [{"content": "\"\"\"Settings management.\n\"\"\"\n\nimport json\nimport os\nimport warnings\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Union\n\nfrom appdirs import user_config_dir\nfrom yaml import safe_dump, safe_load\n\nfrom ...utils.translations import trans\nfrom .._base import _APPAUTHOR, _APPNAME, _FILENAME\nfrom ._defaults import CORE_SETTINGS as CORE_SETTINGS\nfrom ._defaults import (\n AppearanceSettings,\n ApplicationSettings,\n BaseNapariSettings,\n ExperimentalSettings,\n PluginsSettings,\n ShortcutsSettings,\n)\n\n\nclass _SettingsMixin:\n appearance: AppearanceSettings\n application: ApplicationSettings\n plugins: PluginsSettings\n shortcuts: ShortcutsSettings\n experimental: ExperimentalSettings\n\n\nclass SettingsManager(_SettingsMixin):\n \"\"\"\n Napari settings manager using evented SettingsModels.\n\n This provides the presistence layer for the application settings.\n\n Parameters\n ----------\n config_path : str, optional\n Provide the base folder to store napari configuration. Default is None,\n which will point to user config provided by `appdirs`.\n save_to_disk : bool, optional\n Persist settings on disk. Default is True.\n\n Notes\n -----\n The settings manager will create a new user configuration folder which is\n provided by `appdirs` in a cross platform manner. On the first startup a\n new configuration file will be created using the default values defined by\n the `CORE_SETTINGS` models.\n\n If a configuration file is found in the specified location, it will be\n loaded by the `_load` method. On configuration load the following checks\n are performed:\n\n - If invalid sections are found, these will be removed from the file.\n - If invalid keys are found within a valid section, these will be removed\n from the file.\n - If invalid values are found within valid sections and valid keys, these\n will be replaced by the default value provided by `CORE_SETTINGS`\n models.\n \"\"\"\n\n _FILENAME = _FILENAME\n _APPNAME = _APPNAME\n _APPAUTHOR = _APPAUTHOR\n\n def __init__(\n self,\n config_path: Optional[Path] = None,\n save_to_disk: bool = True,\n ):\n self._config_path = (\n Path(user_config_dir(self._APPNAME, self._APPAUTHOR))\n if config_path is None\n else Path(config_path)\n )\n self._save_to_disk = save_to_disk\n self._settings: Dict[str, BaseNapariSettings] = {}\n self._defaults: Dict[str, BaseNapariSettings] = {}\n self._plugins: List[str] = []\n self._env_settings: Dict[str, Any] = {}\n\n if not self._config_path.is_dir():\n os.makedirs(self._config_path)\n\n self._load()\n\n def __getattr__(self, attr):\n if attr in self._settings:\n return self._settings[attr]\n\n def __dir__(self):\n \"\"\"Add setting keys to make tab completion works.\"\"\"\n return list(super().__dir__()) + list(self._settings)\n\n def __str__(self):\n return safe_dump(self._to_dict(safe=True))\n\n def _remove_default(self, settings_data):\n \"\"\"\n Attempt to convert self to dict and to remove any default values from the configuration\n \"\"\"\n for section, values in settings_data.items():\n if section not in self._defaults:\n continue\n\n default_values = self._defaults[section].dict()\n for k, v in list(values.items()):\n if default_values.get(k, None) == v:\n del values[k]\n\n return settings_data\n\n def _to_dict(self, safe: bool = False) -> dict:\n \"\"\"Convert the settings to a dictionary.\"\"\"\n data = {}\n for section, model in self._settings.items():\n if safe:\n # We roundtrip to keep string objects (like SchemaVersion)\n # yaml representable\n data[section] = json.loads(model.json())\n else:\n data[section] = model.dict()\n\n return data\n\n def _save(self):\n \"\"\"Save configuration to disk.\"\"\"\n if self._save_to_disk:\n path = self.path / self._FILENAME\n\n if self._env_settings:\n # If using environment variables do not save them in the\n # `settings.yaml` file. We just delete any keys loaded\n # as environment variables\n data = self._to_dict(safe=True)\n for section, env_data in self._env_settings.items():\n for k, v in env_data.items():\n del data[section][k]\n else:\n data = self._to_dict(safe=True)\n\n with open(path, \"w\") as fh:\n fh.write(safe_dump(self._remove_default(data)))\n\n def _load(self):\n \"\"\"Read configuration from disk.\"\"\"\n path = self.path / self._FILENAME\n\n if path.is_file():\n try:\n with open(path) as fh:\n data = safe_load(fh.read()) or {}\n except Exception as err:\n warnings.warn(\n trans._(\n \"The content of the napari settings file could not be read\\n\\nThe default settings will be used and the content of the file will be replaced the next time settings are changed.\\n\\nError:\\n{err}\",\n deferred=True,\n err=err,\n )\n )\n data = {}\n\n # Load data once and save it in the base class\n BaseNapariSettings._LOADED_DATA = data\n\n for setting in CORE_SETTINGS:\n section = setting.schema().get(\"section\", None)\n if section is None:\n raise ValueError(\n trans._(\n \"Settings model {setting!r} must provide a `section` in the `schemas_extra`\",\n deferred=True,\n setting=setting,\n )\n )\n\n self._defaults[section] = setting(\n **{k: v.get_default() for k, v in setting.__fields__.items()}\n )\n model = setting()\n model.events.connect(lambda x: self._save())\n self._settings[section] = model\n self._env_settings[section] = getattr(\n model.__config__, \"_env_settings\"\n )(model)\n\n self._save()\n\n @property\n def path(self):\n return self._config_path\n\n def reset(self):\n \"\"\"Reset settings to default values.\"\"\"\n for section in self._settings:\n for key, default_value in self._defaults[section].dict().items():\n setattr(self._settings[section], key, default_value)\n\n self._save()\n\n def schemas(self) -> dict:\n \"\"\"Return the json schema for each of the settings model.\"\"\"\n schemas = {}\n for section, settings in self._settings.items():\n schemas[section] = {\n \"json_schema\": settings.schema_json(),\n \"model\": settings,\n }\n\n return schemas\n\n def register_plugin(self, plugin):\n \"\"\"Register plugin settings with the settings manager.\n\n Parameters\n ----------\n plugin\n The napari plugin that may or may not provide settings.\n \"\"\"\n self._plugins.append(plugin)\n\n\nclass _SettingsProxy(_SettingsMixin):\n \"\"\"Backwards compatibility layer.\"\"\"\n\n def __getattribute__(self, name) -> Any:\n return getattr(get_settings(), name)\n\n\nSETTINGS: Union[SettingsManager, _SettingsProxy] = _SettingsProxy()\n\n\ndef get_settings(path: Optional[Union[Path, str]] = None) -> SettingsManager:\n \"\"\"\n Get settings for a given path.\n\n Parameters\n ----------\n path: Path, optional\n The path to read/write the settings from.\n\n Returns\n -------\n SettingsManager\n The settings manager.\n\n Notes\n -----\n The path can only be set once per session.\n \"\"\"\n global SETTINGS\n\n if isinstance(SETTINGS, _SettingsProxy):\n config_path = Path(path).resolve() if path else None\n SETTINGS = SettingsManager(config_path=config_path)\n elif path is not None:\n import inspect\n\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)\n raise Exception(\n trans._(\n \"The path can only be set once per session. Settings called from {calframe[1][3]}\",\n deferred=True,\n calframe=calframe,\n )\n )\n\n return SETTINGS\n", "path": "napari/utils/settings/_manager.py"}]}
| 3,249 | 177 |
gh_patches_debug_64224
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-809
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_func_args maximum recursion
https://github.com/scrapy/scrapy/blob/master/scrapy/utils/python.py#L149
Today I was working on a project were I have to skip the first item of a list, and then join the rest. Instead of writing the typical slice I tried something much more good looking `Compose(itemgetter(slice(1, None)), Join())` but I found out this maximum recursion. I did some research and ask @dangra about it, but nothing came up.
I think the main problem is that `inspect` isn't able recognize `itemgetter` as `something`.
``` python
>>> inspect.getmembers(itemgetter(2))
[('__call__',
<method-wrapper '__call__' of operator.itemgetter object at 0x7f79aeffb990>),
('__class__', <type 'operator.itemgetter'>),
('__delattr__',
<method-wrapper '__delattr__' of operator.itemgetter object at 0x7f79aeffb990>),
('__doc__',
'itemgetter(item, ...) --> itemgetter object\n\nReturn a callable object that fetches the given item(s) from its operand.\nAfter, f=itemgetter(2), the call f(r) returns r[2].\nAfter, g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3])'),
('__format__',
<built-in method __format__ of operator.itemgetter object at 0x7f79aeffb990>),
('__getattribute__',
<method-wrapper '__getattribute__' of operator.itemgetter object at 0x7f79aeffb990>),
('__hash__',
<method-wrapper '__hash__' of operator.itemgetter object at 0x7f79aeffb990>),
('__init__',
<method-wrapper '__init__' of operator.itemgetter object at 0x7f79aeffb990>),
('__new__', <built-in method __new__ of type object at 0x8c1ec0>),
('__reduce__',
<built-in method __reduce__ of operator.itemgetter object at 0x7f79aeffb990>),
('__reduce_ex__',
<built-in method __reduce_ex__ of operator.itemgetter object at 0x7f79aeffb990>),
('__repr__',
<method-wrapper '__repr__' of operator.itemgetter object at 0x7f79aeffb990>),
('__setattr__',
<method-wrapper '__setattr__' of operator.itemgetter object at 0x7f79aeffb990>),
('__sizeof__',
<built-in method __sizeof__ of operator.itemgetter object at 0x7f79aeffb990>),
('__str__',
<method-wrapper '__str__' of operator.itemgetter object at 0x7f79aeffb990>),
('__subclasshook__',
<built-in method __subclasshook__ of type object at 0x8c1ec0>)]
>>> inspect.getargspec(itemgetter(2).__call__)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/lib/python2.7/inspect.py", line 815, in getargspec
raise TypeError('{!r} is not a Python function'.format(func))
TypeError: <method-wrapper '__call__' of operator.itemgetter object at 0xb3ddd0> is not a Python function
>>> inspect.getargspec(itemgetter(slice(None, 2)).__init__)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/usr/lib/python2.7/inspect.py", line 815, in getargspec
raise TypeError('{!r} is not a Python function'.format(func))
TypeError: <method-wrapper '__init__' of operator.itemgetter object at 0xb3de10> is not a Python function
```
EDIT: Looks like the reason was C functions weren't covered by inspect module until Python 3.4 (http://bugs.python.org/issue17481)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/python.py`
Content:
```
1 """
2 This module contains essential stuff that should've come with Python itself ;)
3
4 It also contains functions (or functionality) which is in Python versions
5 higher than 2.5 which used to be the lowest version supported by Scrapy.
6
7 """
8 import os
9 import re
10 import inspect
11 import weakref
12 import errno
13 import six
14 from functools import partial, wraps
15
16
17 def flatten(x):
18 """flatten(sequence) -> list
19
20 Returns a single, flat list which contains all elements retrieved
21 from the sequence and all recursively contained sub-sequences
22 (iterables).
23
24 Examples:
25 >>> [1, 2, [3,4], (5,6)]
26 [1, 2, [3, 4], (5, 6)]
27 >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
28 [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]"""
29
30 result = []
31 for el in x:
32 if hasattr(el, "__iter__"):
33 result.extend(flatten(el))
34 else:
35 result.append(el)
36 return result
37
38
39 def unique(list_, key=lambda x: x):
40 """efficient function to uniquify a list preserving item order"""
41 seen = set()
42 result = []
43 for item in list_:
44 seenkey = key(item)
45 if seenkey in seen:
46 continue
47 seen.add(seenkey)
48 result.append(item)
49 return result
50
51
52 def str_to_unicode(text, encoding=None, errors='strict'):
53 """Return the unicode representation of text in the given encoding. Unlike
54 .encode(encoding) this function can be applied directly to a unicode
55 object without the risk of double-decoding problems (which can happen if
56 you don't use the default 'ascii' encoding)
57 """
58
59 if encoding is None:
60 encoding = 'utf-8'
61 if isinstance(text, str):
62 return text.decode(encoding, errors)
63 elif isinstance(text, unicode):
64 return text
65 else:
66 raise TypeError('str_to_unicode must receive a str or unicode object, got %s' % type(text).__name__)
67
68 def unicode_to_str(text, encoding=None, errors='strict'):
69 """Return the str representation of text in the given encoding. Unlike
70 .encode(encoding) this function can be applied directly to a str
71 object without the risk of double-decoding problems (which can happen if
72 you don't use the default 'ascii' encoding)
73 """
74
75 if encoding is None:
76 encoding = 'utf-8'
77 if isinstance(text, unicode):
78 return text.encode(encoding, errors)
79 elif isinstance(text, str):
80 return text
81 else:
82 raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)
83
84 def re_rsearch(pattern, text, chunk_size=1024):
85 """
86 This function does a reverse search in a text using a regular expression
87 given in the attribute 'pattern'.
88 Since the re module does not provide this functionality, we have to find for
89 the expression into chunks of text extracted from the end (for the sake of efficiency).
90 At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
91 the pattern. If the pattern is not found, another chunk is extracted, and another
92 search is performed.
93 This process continues until a match is found, or until the whole file is read.
94 In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
95 the start position of the match, and the ending (regarding the entire text).
96 """
97 def _chunk_iter():
98 offset = len(text)
99 while True:
100 offset -= (chunk_size * 1024)
101 if offset <= 0:
102 break
103 yield (text[offset:], offset)
104 yield (text, 0)
105
106 pattern = re.compile(pattern) if isinstance(pattern, basestring) else pattern
107 for chunk, offset in _chunk_iter():
108 matches = [match for match in pattern.finditer(chunk)]
109 if matches:
110 return (offset + matches[-1].span()[0], offset + matches[-1].span()[1])
111 return None
112
113 def memoizemethod_noargs(method):
114 """Decorator to cache the result of a method (without arguments) using a
115 weak reference to its object
116 """
117 cache = weakref.WeakKeyDictionary()
118 @wraps(method)
119 def new_method(self, *args, **kwargs):
120 if self not in cache:
121 cache[self] = method(self, *args, **kwargs)
122 return cache[self]
123 return new_method
124
125 _BINARYCHARS = set(map(chr, range(32))) - set(["\0", "\t", "\n", "\r"])
126
127 def isbinarytext(text):
128 """Return True if the given text is considered binary, or false
129 otherwise, by looking for binary bytes at their chars
130 """
131 assert isinstance(text, str), "text must be str, got '%s'" % type(text).__name__
132 return any(c in _BINARYCHARS for c in text)
133
134 def get_func_args(func, stripself=False):
135 """Return the argument name list of a callable"""
136 if inspect.isfunction(func):
137 func_args, _, _, _ = inspect.getargspec(func)
138 elif inspect.isclass(func):
139 return get_func_args(func.__init__, True)
140 elif inspect.ismethod(func):
141 return get_func_args(func.__func__, True)
142 elif inspect.ismethoddescriptor(func):
143 return []
144 elif isinstance(func, partial):
145 return [x for x in get_func_args(func.func)[len(func.args):]
146 if not (func.keywords and x in func.keywords)]
147 elif hasattr(func, '__call__'):
148 if inspect.isroutine(func):
149 return []
150 else:
151 return get_func_args(func.__call__, True)
152 else:
153 raise TypeError('%s is not callable' % type(func))
154 if stripself:
155 func_args.pop(0)
156 return func_args
157
158 def get_spec(func):
159 """Returns (args, kwargs) tuple for a function
160 >>> import re
161 >>> get_spec(re.match)
162 (['pattern', 'string'], {'flags': 0})
163
164 >>> class Test(object):
165 ... def __call__(self, val):
166 ... pass
167 ... def method(self, val, flags=0):
168 ... pass
169
170 >>> get_spec(Test)
171 (['self', 'val'], {})
172
173 >>> get_spec(Test.method)
174 (['self', 'val'], {'flags': 0})
175
176 >>> get_spec(Test().method)
177 (['self', 'val'], {'flags': 0})
178 """
179
180 if inspect.isfunction(func) or inspect.ismethod(func):
181 spec = inspect.getargspec(func)
182 elif hasattr(func, '__call__'):
183 spec = inspect.getargspec(func.__call__)
184 else:
185 raise TypeError('%s is not callable' % type(func))
186
187 defaults = spec.defaults or []
188
189 firstdefault = len(spec.args) - len(defaults)
190 args = spec.args[:firstdefault]
191 kwargs = dict(zip(spec.args[firstdefault:], defaults))
192 return args, kwargs
193
194 def equal_attributes(obj1, obj2, attributes):
195 """Compare two objects attributes"""
196 # not attributes given return False by default
197 if not attributes:
198 return False
199
200 for attr in attributes:
201 # support callables like itemgetter
202 if callable(attr):
203 if not attr(obj1) == attr(obj2):
204 return False
205 else:
206 # check that objects has attribute
207 if not hasattr(obj1, attr):
208 return False
209 if not hasattr(obj2, attr):
210 return False
211 # compare object attributes
212 if not getattr(obj1, attr) == getattr(obj2, attr):
213 return False
214 # all attributes equal
215 return True
216
217
218 class WeakKeyCache(object):
219
220 def __init__(self, default_factory):
221 self.default_factory = default_factory
222 self._weakdict = weakref.WeakKeyDictionary()
223
224 def __getitem__(self, key):
225 if key not in self._weakdict:
226 self._weakdict[key] = self.default_factory(key)
227 return self._weakdict[key]
228
229
230 def stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):
231 """Return a (new) dict with the unicode keys (and values if, keys_only is
232 False) of the given dict converted to strings. `dct_or_tuples` can be a
233 dict or a list of tuples, like any dict constructor supports.
234 """
235 d = {}
236 for k, v in six.iteritems(dict(dct_or_tuples)):
237 k = k.encode(encoding) if isinstance(k, unicode) else k
238 if not keys_only:
239 v = v.encode(encoding) if isinstance(v, unicode) else v
240 d[k] = v
241 return d
242
243 def is_writable(path):
244 """Return True if the given path can be written (if it exists) or created
245 (if it doesn't exist)
246 """
247 if os.path.exists(path):
248 return os.access(path, os.W_OK)
249 else:
250 return os.access(os.path.dirname(path), os.W_OK)
251
252 def setattr_default(obj, name, value):
253 """Set attribute value, but only if it's not already set. Similar to
254 setdefault() for dicts.
255 """
256 if not hasattr(obj, name):
257 setattr(obj, name, value)
258
259
260 def retry_on_eintr(function, *args, **kw):
261 """Run a function and retry it while getting EINTR errors"""
262 while True:
263 try:
264 return function(*args, **kw)
265 except IOError as e:
266 if e.errno != errno.EINTR:
267 raise
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py
--- a/scrapy/utils/python.py
+++ b/scrapy/utils/python.py
@@ -147,6 +147,8 @@
elif hasattr(func, '__call__'):
if inspect.isroutine(func):
return []
+ elif getattr(func, '__name__', None) == '__call__':
+ return []
else:
return get_func_args(func.__call__, True)
else:
|
{"golden_diff": "diff --git a/scrapy/utils/python.py b/scrapy/utils/python.py\n--- a/scrapy/utils/python.py\n+++ b/scrapy/utils/python.py\n@@ -147,6 +147,8 @@\n elif hasattr(func, '__call__'):\n if inspect.isroutine(func):\n return []\n+ elif getattr(func, '__name__', None) == '__call__':\n+ return []\n else:\n return get_func_args(func.__call__, True)\n else:\n", "issue": "get_func_args maximum recursion\nhttps://github.com/scrapy/scrapy/blob/master/scrapy/utils/python.py#L149\n\nToday I was working on a project were I have to skip the first item of a list, and then join the rest. Instead of writing the typical slice I tried something much more good looking `Compose(itemgetter(slice(1, None)), Join())` but I found out this maximum recursion. I did some research and ask @dangra about it, but nothing came up.\nI think the main problem is that `inspect` isn't able recognize `itemgetter` as `something`.\n\n``` python\n>>> inspect.getmembers(itemgetter(2))\n[('__call__',\n <method-wrapper '__call__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__class__', <type 'operator.itemgetter'>),\n ('__delattr__',\n <method-wrapper '__delattr__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__doc__',\n 'itemgetter(item, ...) --> itemgetter object\\n\\nReturn a callable object that fetches the given item(s) from its operand.\\nAfter, f=itemgetter(2), the call f(r) returns r[2].\\nAfter, g=itemgetter(2,5,3), the call g(r) returns (r[2], r[5], r[3])'),\n ('__format__',\n <built-in method __format__ of operator.itemgetter object at 0x7f79aeffb990>),\n ('__getattribute__',\n <method-wrapper '__getattribute__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__hash__',\n <method-wrapper '__hash__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__init__',\n <method-wrapper '__init__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__new__', <built-in method __new__ of type object at 0x8c1ec0>),\n ('__reduce__',\n <built-in method __reduce__ of operator.itemgetter object at 0x7f79aeffb990>),\n ('__reduce_ex__',\n <built-in method __reduce_ex__ of operator.itemgetter object at 0x7f79aeffb990>),\n ('__repr__',\n <method-wrapper '__repr__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__setattr__',\n <method-wrapper '__setattr__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__sizeof__',\n <built-in method __sizeof__ of operator.itemgetter object at 0x7f79aeffb990>),\n ('__str__',\n <method-wrapper '__str__' of operator.itemgetter object at 0x7f79aeffb990>),\n ('__subclasshook__',\n <built-in method __subclasshook__ of type object at 0x8c1ec0>)]\n>>> inspect.getargspec(itemgetter(2).__call__)\nTraceback (most recent call last):\n File \"<console>\", line 1, in <module>\n File \"/usr/lib/python2.7/inspect.py\", line 815, in getargspec\n raise TypeError('{!r} is not a Python function'.format(func))\nTypeError: <method-wrapper '__call__' of operator.itemgetter object at 0xb3ddd0> is not a Python function\n>>> inspect.getargspec(itemgetter(slice(None, 2)).__init__)\nTraceback (most recent call last):\n File \"<console>\", line 1, in <module>\n File \"/usr/lib/python2.7/inspect.py\", line 815, in getargspec\n raise TypeError('{!r} is not a Python function'.format(func))\nTypeError: <method-wrapper '__init__' of operator.itemgetter object at 0xb3de10> is not a Python function\n```\n\nEDIT: Looks like the reason was C functions weren't covered by inspect module until Python 3.4 (http://bugs.python.org/issue17481)\n\n", "before_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\nIt also contains functions (or functionality) which is in Python versions\nhigher than 2.5 which used to be the lowest version supported by Scrapy.\n\n\"\"\"\nimport os\nimport re\nimport inspect\nimport weakref\nimport errno\nimport six\nfrom functools import partial, wraps\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = []\n for el in x:\n if hasattr(el, \"__iter__\"):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef str_to_unicode(text, encoding=None, errors='strict'):\n \"\"\"Return the unicode representation of text in the given encoding. Unlike\n .encode(encoding) this function can be applied directly to a unicode\n object without the risk of double-decoding problems (which can happen if\n you don't use the default 'ascii' encoding)\n \"\"\"\n\n if encoding is None:\n encoding = 'utf-8'\n if isinstance(text, str):\n return text.decode(encoding, errors)\n elif isinstance(text, unicode):\n return text\n else:\n raise TypeError('str_to_unicode must receive a str or unicode object, got %s' % type(text).__name__)\n\ndef unicode_to_str(text, encoding=None, errors='strict'):\n \"\"\"Return the str representation of text in the given encoding. Unlike\n .encode(encoding) this function can be applied directly to a str\n object without the risk of double-decoding problems (which can happen if\n you don't use the default 'ascii' encoding)\n \"\"\"\n\n if encoding is None:\n encoding = 'utf-8'\n if isinstance(text, unicode):\n return text.encode(encoding, errors)\n elif isinstance(text, str):\n return text\n else:\n raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= (chunk_size * 1024)\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n pattern = re.compile(pattern) if isinstance(pattern, basestring) else pattern\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n return (offset + matches[-1].span()[0], offset + matches[-1].span()[1])\n return None\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n return new_method\n\n_BINARYCHARS = set(map(chr, range(32))) - set([\"\\0\", \"\\t\", \"\\n\", \"\\r\"])\n\ndef isbinarytext(text):\n \"\"\"Return True if the given text is considered binary, or false\n otherwise, by looking for binary bytes at their chars\n \"\"\"\n assert isinstance(text, str), \"text must be str, got '%s'\" % type(text).__name__\n return any(c in _BINARYCHARS for c in text)\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable\"\"\"\n if inspect.isfunction(func):\n func_args, _, _, _ = inspect.getargspec(func)\n elif inspect.isclass(func):\n return get_func_args(func.__init__, True)\n elif inspect.ismethod(func):\n return get_func_args(func.__func__, True)\n elif inspect.ismethoddescriptor(func):\n return []\n elif isinstance(func, partial):\n return [x for x in get_func_args(func.func)[len(func.args):]\n if not (func.keywords and x in func.keywords)]\n elif hasattr(func, '__call__'):\n if inspect.isroutine(func):\n return []\n else:\n return get_func_args(func.__call__, True)\n else:\n raise TypeError('%s is not callable' % type(func))\n if stripself:\n func_args.pop(0)\n return func_args\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test(object):\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getargspec(func)\n elif hasattr(func, '__call__'):\n spec = inspect.getargspec(func.__call__)\n else:\n raise TypeError('%s is not callable' % type(func))\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if not attr(obj1) == attr(obj2):\n return False\n else:\n # check that objects has attribute\n if not hasattr(obj1, attr):\n return False\n if not hasattr(obj2, attr):\n return False\n # compare object attributes\n if not getattr(obj1, attr) == getattr(obj2, attr):\n return False\n # all attributes equal\n return True\n\n\nclass WeakKeyCache(object):\n\n def __init__(self, default_factory):\n self.default_factory = default_factory\n self._weakdict = weakref.WeakKeyDictionary()\n\n def __getitem__(self, key):\n if key not in self._weakdict:\n self._weakdict[key] = self.default_factory(key)\n return self._weakdict[key]\n\n\ndef stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):\n \"\"\"Return a (new) dict with the unicode keys (and values if, keys_only is\n False) of the given dict converted to strings. `dct_or_tuples` can be a\n dict or a list of tuples, like any dict constructor supports.\n \"\"\"\n d = {}\n for k, v in six.iteritems(dict(dct_or_tuples)):\n k = k.encode(encoding) if isinstance(k, unicode) else k\n if not keys_only:\n v = v.encode(encoding) if isinstance(v, unicode) else v\n d[k] = v\n return d\n\ndef is_writable(path):\n \"\"\"Return True if the given path can be written (if it exists) or created\n (if it doesn't exist)\n \"\"\"\n if os.path.exists(path):\n return os.access(path, os.W_OK)\n else:\n return os.access(os.path.dirname(path), os.W_OK)\n\ndef setattr_default(obj, name, value):\n \"\"\"Set attribute value, but only if it's not already set. Similar to\n setdefault() for dicts.\n \"\"\"\n if not hasattr(obj, name):\n setattr(obj, name, value)\n\n\ndef retry_on_eintr(function, *args, **kw):\n \"\"\"Run a function and retry it while getting EINTR errors\"\"\"\n while True:\n try:\n return function(*args, **kw)\n except IOError as e:\n if e.errno != errno.EINTR:\n raise\n", "path": "scrapy/utils/python.py"}], "after_files": [{"content": "\"\"\"\nThis module contains essential stuff that should've come with Python itself ;)\n\nIt also contains functions (or functionality) which is in Python versions\nhigher than 2.5 which used to be the lowest version supported by Scrapy.\n\n\"\"\"\nimport os\nimport re\nimport inspect\nimport weakref\nimport errno\nimport six\nfrom functools import partial, wraps\n\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = []\n for el in x:\n if hasattr(el, \"__iter__\"):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\n\ndef unique(list_, key=lambda x: x):\n \"\"\"efficient function to uniquify a list preserving item order\"\"\"\n seen = set()\n result = []\n for item in list_:\n seenkey = key(item)\n if seenkey in seen:\n continue\n seen.add(seenkey)\n result.append(item)\n return result\n\n\ndef str_to_unicode(text, encoding=None, errors='strict'):\n \"\"\"Return the unicode representation of text in the given encoding. Unlike\n .encode(encoding) this function can be applied directly to a unicode\n object without the risk of double-decoding problems (which can happen if\n you don't use the default 'ascii' encoding)\n \"\"\"\n\n if encoding is None:\n encoding = 'utf-8'\n if isinstance(text, str):\n return text.decode(encoding, errors)\n elif isinstance(text, unicode):\n return text\n else:\n raise TypeError('str_to_unicode must receive a str or unicode object, got %s' % type(text).__name__)\n\ndef unicode_to_str(text, encoding=None, errors='strict'):\n \"\"\"Return the str representation of text in the given encoding. Unlike\n .encode(encoding) this function can be applied directly to a str\n object without the risk of double-decoding problems (which can happen if\n you don't use the default 'ascii' encoding)\n \"\"\"\n\n if encoding is None:\n encoding = 'utf-8'\n if isinstance(text, unicode):\n return text.encode(encoding, errors)\n elif isinstance(text, str):\n return text\n else:\n raise TypeError('unicode_to_str must receive a unicode or str object, got %s' % type(text).__name__)\n\ndef re_rsearch(pattern, text, chunk_size=1024):\n \"\"\"\n This function does a reverse search in a text using a regular expression\n given in the attribute 'pattern'.\n Since the re module does not provide this functionality, we have to find for\n the expression into chunks of text extracted from the end (for the sake of efficiency).\n At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for\n the pattern. If the pattern is not found, another chunk is extracted, and another\n search is performed.\n This process continues until a match is found, or until the whole file is read.\n In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing\n the start position of the match, and the ending (regarding the entire text).\n \"\"\"\n def _chunk_iter():\n offset = len(text)\n while True:\n offset -= (chunk_size * 1024)\n if offset <= 0:\n break\n yield (text[offset:], offset)\n yield (text, 0)\n\n pattern = re.compile(pattern) if isinstance(pattern, basestring) else pattern\n for chunk, offset in _chunk_iter():\n matches = [match for match in pattern.finditer(chunk)]\n if matches:\n return (offset + matches[-1].span()[0], offset + matches[-1].span()[1])\n return None\n\ndef memoizemethod_noargs(method):\n \"\"\"Decorator to cache the result of a method (without arguments) using a\n weak reference to its object\n \"\"\"\n cache = weakref.WeakKeyDictionary()\n @wraps(method)\n def new_method(self, *args, **kwargs):\n if self not in cache:\n cache[self] = method(self, *args, **kwargs)\n return cache[self]\n return new_method\n\n_BINARYCHARS = set(map(chr, range(32))) - set([\"\\0\", \"\\t\", \"\\n\", \"\\r\"])\n\ndef isbinarytext(text):\n \"\"\"Return True if the given text is considered binary, or false\n otherwise, by looking for binary bytes at their chars\n \"\"\"\n assert isinstance(text, str), \"text must be str, got '%s'\" % type(text).__name__\n return any(c in _BINARYCHARS for c in text)\n\ndef get_func_args(func, stripself=False):\n \"\"\"Return the argument name list of a callable\"\"\"\n if inspect.isfunction(func):\n func_args, _, _, _ = inspect.getargspec(func)\n elif inspect.isclass(func):\n return get_func_args(func.__init__, True)\n elif inspect.ismethod(func):\n return get_func_args(func.__func__, True)\n elif inspect.ismethoddescriptor(func):\n return []\n elif isinstance(func, partial):\n return [x for x in get_func_args(func.func)[len(func.args):]\n if not (func.keywords and x in func.keywords)]\n elif hasattr(func, '__call__'):\n if inspect.isroutine(func):\n return []\n elif getattr(func, '__name__', None) == '__call__':\n return []\n else:\n return get_func_args(func.__call__, True)\n else:\n raise TypeError('%s is not callable' % type(func))\n if stripself:\n func_args.pop(0)\n return func_args\n\ndef get_spec(func):\n \"\"\"Returns (args, kwargs) tuple for a function\n >>> import re\n >>> get_spec(re.match)\n (['pattern', 'string'], {'flags': 0})\n\n >>> class Test(object):\n ... def __call__(self, val):\n ... pass\n ... def method(self, val, flags=0):\n ... pass\n\n >>> get_spec(Test)\n (['self', 'val'], {})\n\n >>> get_spec(Test.method)\n (['self', 'val'], {'flags': 0})\n\n >>> get_spec(Test().method)\n (['self', 'val'], {'flags': 0})\n \"\"\"\n\n if inspect.isfunction(func) or inspect.ismethod(func):\n spec = inspect.getargspec(func)\n elif hasattr(func, '__call__'):\n spec = inspect.getargspec(func.__call__)\n else:\n raise TypeError('%s is not callable' % type(func))\n\n defaults = spec.defaults or []\n\n firstdefault = len(spec.args) - len(defaults)\n args = spec.args[:firstdefault]\n kwargs = dict(zip(spec.args[firstdefault:], defaults))\n return args, kwargs\n\ndef equal_attributes(obj1, obj2, attributes):\n \"\"\"Compare two objects attributes\"\"\"\n # not attributes given return False by default\n if not attributes:\n return False\n\n for attr in attributes:\n # support callables like itemgetter\n if callable(attr):\n if not attr(obj1) == attr(obj2):\n return False\n else:\n # check that objects has attribute\n if not hasattr(obj1, attr):\n return False\n if not hasattr(obj2, attr):\n return False\n # compare object attributes\n if not getattr(obj1, attr) == getattr(obj2, attr):\n return False\n # all attributes equal\n return True\n\n\nclass WeakKeyCache(object):\n\n def __init__(self, default_factory):\n self.default_factory = default_factory\n self._weakdict = weakref.WeakKeyDictionary()\n\n def __getitem__(self, key):\n if key not in self._weakdict:\n self._weakdict[key] = self.default_factory(key)\n return self._weakdict[key]\n\n\ndef stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):\n \"\"\"Return a (new) dict with the unicode keys (and values if, keys_only is\n False) of the given dict converted to strings. `dct_or_tuples` can be a\n dict or a list of tuples, like any dict constructor supports.\n \"\"\"\n d = {}\n for k, v in six.iteritems(dict(dct_or_tuples)):\n k = k.encode(encoding) if isinstance(k, unicode) else k\n if not keys_only:\n v = v.encode(encoding) if isinstance(v, unicode) else v\n d[k] = v\n return d\n\ndef is_writable(path):\n \"\"\"Return True if the given path can be written (if it exists) or created\n (if it doesn't exist)\n \"\"\"\n if os.path.exists(path):\n return os.access(path, os.W_OK)\n else:\n return os.access(os.path.dirname(path), os.W_OK)\n\ndef setattr_default(obj, name, value):\n \"\"\"Set attribute value, but only if it's not already set. Similar to\n setdefault() for dicts.\n \"\"\"\n if not hasattr(obj, name):\n setattr(obj, name, value)\n\n\ndef retry_on_eintr(function, *args, **kw):\n \"\"\"Run a function and retry it while getting EINTR errors\"\"\"\n while True:\n try:\n return function(*args, **kw)\n except IOError as e:\n if e.errno != errno.EINTR:\n raise\n", "path": "scrapy/utils/python.py"}]}
| 4,048 | 101 |
gh_patches_debug_25393
|
rasdani/github-patches
|
git_diff
|
kivy__python-for-android-2841
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
vlc recipe build fail
### Logs
A first build fails with
```
[INFO]: Building vlc for arm64-v8a
[INFO]: -> directory context /home/bobf/ex/hello/.buildozer/android/platform/build-arm64-v8a/build/other_builds/vlc/arm64-v8a__ndk_target_21/vlc/vlc-port-android
[INFO]: compiling vlc from sources
[DEBUG]: environment: {'SHELL': '/bin/bash', 'WSL2_GUI_APPS_ENABLED': '1', 'WSL_DISTRO_NAME': 'Ubuntu-22.04', 'WT_SESSION': '15e13e88-1c44-49f6-a30f-3567f5691f5f', 'NAME': 'DESK', 'PWD': '/home/bobf/ex/hello', 'LOGNAME': 'bobf', 'KIVY': '/mnt/c/users/bobf/documents/kivy', 'UIMD': '/mnt/c/users/bobf/documents/pm/uimd', 'HOME': '/home/bobf', 'LANG': 'C.UTF-8', 'WSL_INTEROP': '/run/WSL/9_interop', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.webp=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'WAYLAND_DISPLAY': 'wayland-0', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'TERM': 'xterm-256color', 'LESSOPEN': '| /usr/bin/lesspipe %s', 'USER': 'bobf', 'WIN': '/mnt/c/users/bobf', 'DISPLAY': ':0', 'SHLVL': '1', 'XDG_RUNTIME_DIR': '/mnt/wslg/runtime-dir', 'WSLENV': 'WT_SESSION::WT_PROFILE_ID', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'GITHUB': '/mnt/c/users/bobf/documents/GitHub', 'PATH': '/home/bobf/.buildozer/android/platform/apache-ant-1.9.4/bin:/home/bobf/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/lib/wsl/lib:/mnt/c/Program Files (x86)/Common Files/Oracle/Java/javapath:/mnt/c/WINDOWS/system32:/mnt/c/WINDOWS:/mnt/c/WINDOWS/System32/Wbem:/mnt/c/WINDOWS/System32/WindowsPowerShell/v1.0/:/mnt/c/WINDOWS/System32/OpenSSH/:/mnt/c/Program Files/Git/cmd:/mnt/c/Program Files/dotnet/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python311/Scripts/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python311/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python310/Scripts/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python310/:/mnt/c/Users/Bobf/AppData/Local/Microsoft/WindowsApps:/mnt/c/Users/Bobf/AppData/Local/Android/Sdk/platform-tools:/mnt/c/Users/Bobf/AppData/Local/Android/Sdk/emulator:/mnt/c/Program Files/MySQL/MySQL Server 8.0/bin:/mnt/c/tools/kotlinc/bin:/mnt/c/Users/Bobf/AppData/Local/Programs/MiKTeX/miktex/bin/x64/:/mnt/c/Users/Bobf/AppData/Local/GitHubDesktop/bin:/mnt/c/Tools/ffmpeg-2023-01-25-essentials/bin:/snap/bin:/home/bobf/.local/bin/', 'HOSTTYPE': 'x86_64', 'PULSE_SERVER': 'unix:/mnt/wslg/PulseServer', 'WT_PROFILE_ID': '{d7b20cea-47a9-518c-95a4-c8bd91e2e1c6}', 'OLDPWD': '/home/bobf/ex/regression', '_': '/home/bobf/.local/bin/buildozer', 'PACKAGES_PATH': '/home/bobf/.buildozer/android/packages', 'ANDROIDSDK': '/home/bobf/.buildozer/android/platform/android-sdk', 'ANDROIDNDK': '/home/bobf/.buildozer/android/platform/android-ndk-r25b', 'ANDROIDAPI': '31', 'ANDROIDMINAPI': '21', 'ANDROID_ABI': 'arm64-v8a', 'ANDROID_NDK': '/home/bobf/.buildozer/android/platform/android-ndk-r25b', 'ANDROID_SDK': '/home/bobf/.buildozer/android/platform/android-sdk'}
Traceback (most recent call last):
File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py", line 1312, in <module>
main()
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/entrypoints.py", line 18, in main
ToolchainCL()
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py", line 734, in __init__
getattr(self, command)(args)
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py", line 153, in wrapper_func
build_dist_from_args(ctx, dist, args)
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py", line 212, in build_dist_from_args
build_recipes(build_order, python_modules, ctx,
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/build.py", line 504, in build_recipes
recipe.build_arch(arch)
File "/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/recipes/vlc/__init__.py", line 68, in build_arch
shprint(sh.Command('./compile.sh'), _env=env,
File "/home/bobf/.local/lib/python3.10/site-packages/sh.py", line 1342, in __init__
raise CommandNotFound(path)
sh.CommandNotFound: ./compile.sh
========================================
```
subsequent builds fail with
```
[DEBUG]: -> running git clone http://git.videolan.org/git/vlc-ports/android.git /home/bobf/ex/hello/.buildozer/android/platform/build-arm64-v8a/build/other_builds/vlc/arm64-v8a__ndk_target_21/vlc/vlc-port-android
[DEBUG]: fatal: destination path '/home/bobf/ex/hello/.buildozer/android/platform/build-arm64-v8a/build/other_builds/vlc/arm64-v8a__ndk_target_21/vlc/vlc-port-android' already exists and is not an empty directory.
Exception in thread background thread for pid 21212:
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/vlc/__init__.py`
Content:
```
1 from pythonforandroid.toolchain import Recipe, current_directory
2 from pythonforandroid.logger import info, debug, shprint, warning
3 from os.path import join, isdir, isfile
4 from os import environ
5 import sh
6
7
8 class VlcRecipe(Recipe):
9 version = '3.0.0'
10 url = None
11 name = 'vlc'
12
13 depends = []
14
15 port_git = 'http://git.videolan.org/git/vlc-ports/android.git'
16 # vlc_git = 'http://git.videolan.org/git/vlc.git'
17 ENV_LIBVLC_AAR = 'LIBVLC_AAR'
18 aars = {} # for future use of multiple arch
19
20 def prebuild_arch(self, arch):
21 super().prebuild_arch(arch)
22 build_dir = self.get_build_dir(arch.arch)
23 port_dir = join(build_dir, 'vlc-port-android')
24 if self.ENV_LIBVLC_AAR in environ:
25 aar = environ.get(self.ENV_LIBVLC_AAR)
26 if isdir(aar):
27 aar = join(aar, 'libvlc-{}.aar'.format(self.version))
28 if not isfile(aar):
29 warning("Error: {} is not valid libvlc-<ver>.aar bundle".format(aar))
30 info("check {} environment!".format(self.ENV_LIBVLC_AAR))
31 exit(1)
32 self.aars[arch] = aar
33 else:
34 aar_path = join(port_dir, 'libvlc', 'build', 'outputs', 'aar')
35 self.aars[arch] = aar = join(aar_path, 'libvlc-{}.aar'.format(self.version))
36 warning("HINT: set path to precompiled libvlc-<ver>.aar bundle "
37 "in {} environment!".format(self.ENV_LIBVLC_AAR))
38 info("libvlc-<ver>.aar should build "
39 "from sources at {}".format(port_dir))
40 if not isfile(join(port_dir, 'compile.sh')):
41 info("clone vlc port for android sources from {}".format(
42 self.port_git))
43 shprint(sh.git, 'clone', self.port_git, port_dir,
44 _tail=20, _critical=True)
45 # now "git clone ..." is a part of compile.sh
46 # vlc_dir = join(port_dir, 'vlc')
47 # if not isfile(join(vlc_dir, 'Makefile.am')):
48 # info("clone vlc sources from {}".format(self.vlc_git))
49 # shprint(sh.git, 'clone', self.vlc_git, vlc_dir,
50 # _tail=20, _critical=True)
51
52 def build_arch(self, arch):
53 super().build_arch(arch)
54 build_dir = self.get_build_dir(arch.arch)
55 port_dir = join(build_dir, 'vlc-port-android')
56 aar = self.aars[arch]
57 if not isfile(aar):
58 with current_directory(port_dir):
59 env = dict(environ)
60 env.update({
61 'ANDROID_ABI': arch.arch,
62 'ANDROID_NDK': self.ctx.ndk_dir,
63 'ANDROID_SDK': self.ctx.sdk_dir,
64 })
65 info("compiling vlc from sources")
66 debug("environment: {}".format(env))
67 if not isfile(join('bin', 'VLC-debug.apk')):
68 shprint(sh.Command('./compile.sh'), _env=env,
69 _tail=50, _critical=True)
70 shprint(sh.Command('./compile-libvlc.sh'), _env=env,
71 _tail=50, _critical=True)
72 shprint(sh.cp, '-a', aar, self.ctx.aars_dir)
73
74
75 recipe = VlcRecipe()
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pythonforandroid/recipes/vlc/__init__.py b/pythonforandroid/recipes/vlc/__init__.py
--- a/pythonforandroid/recipes/vlc/__init__.py
+++ b/pythonforandroid/recipes/vlc/__init__.py
@@ -6,7 +6,7 @@
class VlcRecipe(Recipe):
- version = '3.0.0'
+ version = '3.0.18'
url = None
name = 'vlc'
@@ -52,7 +52,7 @@
def build_arch(self, arch):
super().build_arch(arch)
build_dir = self.get_build_dir(arch.arch)
- port_dir = join(build_dir, 'vlc-port-android')
+ port_dir = join(build_dir, 'vlc-port-android', 'buildsystem')
aar = self.aars[arch]
if not isfile(aar):
with current_directory(port_dir):
@@ -67,7 +67,7 @@
if not isfile(join('bin', 'VLC-debug.apk')):
shprint(sh.Command('./compile.sh'), _env=env,
_tail=50, _critical=True)
- shprint(sh.Command('./compile-libvlc.sh'), _env=env,
+ shprint(sh.Command('./compile-medialibrary.sh'), _env=env,
_tail=50, _critical=True)
shprint(sh.cp, '-a', aar, self.ctx.aars_dir)
|
{"golden_diff": "diff --git a/pythonforandroid/recipes/vlc/__init__.py b/pythonforandroid/recipes/vlc/__init__.py\n--- a/pythonforandroid/recipes/vlc/__init__.py\n+++ b/pythonforandroid/recipes/vlc/__init__.py\n@@ -6,7 +6,7 @@\n \n \n class VlcRecipe(Recipe):\n- version = '3.0.0'\n+ version = '3.0.18'\n url = None\n name = 'vlc'\n \n@@ -52,7 +52,7 @@\n def build_arch(self, arch):\n super().build_arch(arch)\n build_dir = self.get_build_dir(arch.arch)\n- port_dir = join(build_dir, 'vlc-port-android')\n+ port_dir = join(build_dir, 'vlc-port-android', 'buildsystem')\n aar = self.aars[arch]\n if not isfile(aar):\n with current_directory(port_dir):\n@@ -67,7 +67,7 @@\n if not isfile(join('bin', 'VLC-debug.apk')):\n shprint(sh.Command('./compile.sh'), _env=env,\n _tail=50, _critical=True)\n- shprint(sh.Command('./compile-libvlc.sh'), _env=env,\n+ shprint(sh.Command('./compile-medialibrary.sh'), _env=env,\n _tail=50, _critical=True)\n shprint(sh.cp, '-a', aar, self.ctx.aars_dir)\n", "issue": "vlc recipe build fail\n### Logs\r\n\r\nA first build fails with\r\n\r\n```\r\n[INFO]: Building vlc for arm64-v8a\r\n[INFO]: -> directory context /home/bobf/ex/hello/.buildozer/android/platform/build-arm64-v8a/build/other_builds/vlc/arm64-v8a__ndk_target_21/vlc/vlc-port-android\r\n[INFO]: compiling vlc from sources\r\n[DEBUG]: environment: {'SHELL': '/bin/bash', 'WSL2_GUI_APPS_ENABLED': '1', 'WSL_DISTRO_NAME': 'Ubuntu-22.04', 'WT_SESSION': '15e13e88-1c44-49f6-a30f-3567f5691f5f', 'NAME': 'DESK', 'PWD': '/home/bobf/ex/hello', 'LOGNAME': 'bobf', 'KIVY': '/mnt/c/users/bobf/documents/kivy', 'UIMD': '/mnt/c/users/bobf/documents/pm/uimd', 'HOME': '/home/bobf', 'LANG': 'C.UTF-8', 'WSL_INTEROP': '/run/WSL/9_interop', 'LS_COLORS': 'rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.zst=01;31:*.tzst=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.wim=01;31:*.swm=01;31:*.dwm=01;31:*.esd=01;31:*.jpg=01;35:*.jpeg=01;35:*.mjpg=01;35:*.mjpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.webp=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:', 'WAYLAND_DISPLAY': 'wayland-0', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'TERM': 'xterm-256color', 'LESSOPEN': '| /usr/bin/lesspipe %s', 'USER': 'bobf', 'WIN': '/mnt/c/users/bobf', 'DISPLAY': ':0', 'SHLVL': '1', 'XDG_RUNTIME_DIR': '/mnt/wslg/runtime-dir', 'WSLENV': 'WT_SESSION::WT_PROFILE_ID', 'XDG_DATA_DIRS': '/usr/local/share:/usr/share:/var/lib/snapd/desktop', 'GITHUB': '/mnt/c/users/bobf/documents/GitHub', 'PATH': '/home/bobf/.buildozer/android/platform/apache-ant-1.9.4/bin:/home/bobf/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/lib/wsl/lib:/mnt/c/Program Files (x86)/Common Files/Oracle/Java/javapath:/mnt/c/WINDOWS/system32:/mnt/c/WINDOWS:/mnt/c/WINDOWS/System32/Wbem:/mnt/c/WINDOWS/System32/WindowsPowerShell/v1.0/:/mnt/c/WINDOWS/System32/OpenSSH/:/mnt/c/Program Files/Git/cmd:/mnt/c/Program Files/dotnet/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python311/Scripts/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python311/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python310/Scripts/:/mnt/c/Users/Bobf/AppData/Local/Programs/Python/Python310/:/mnt/c/Users/Bobf/AppData/Local/Microsoft/WindowsApps:/mnt/c/Users/Bobf/AppData/Local/Android/Sdk/platform-tools:/mnt/c/Users/Bobf/AppData/Local/Android/Sdk/emulator:/mnt/c/Program Files/MySQL/MySQL Server 8.0/bin:/mnt/c/tools/kotlinc/bin:/mnt/c/Users/Bobf/AppData/Local/Programs/MiKTeX/miktex/bin/x64/:/mnt/c/Users/Bobf/AppData/Local/GitHubDesktop/bin:/mnt/c/Tools/ffmpeg-2023-01-25-essentials/bin:/snap/bin:/home/bobf/.local/bin/', 'HOSTTYPE': 'x86_64', 'PULSE_SERVER': 'unix:/mnt/wslg/PulseServer', 'WT_PROFILE_ID': '{d7b20cea-47a9-518c-95a4-c8bd91e2e1c6}', 'OLDPWD': '/home/bobf/ex/regression', '_': '/home/bobf/.local/bin/buildozer', 'PACKAGES_PATH': '/home/bobf/.buildozer/android/packages', 'ANDROIDSDK': '/home/bobf/.buildozer/android/platform/android-sdk', 'ANDROIDNDK': '/home/bobf/.buildozer/android/platform/android-ndk-r25b', 'ANDROIDAPI': '31', 'ANDROIDMINAPI': '21', 'ANDROID_ABI': 'arm64-v8a', 'ANDROID_NDK': '/home/bobf/.buildozer/android/platform/android-ndk-r25b', 'ANDROID_SDK': '/home/bobf/.buildozer/android/platform/android-sdk'}\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/lib/python3.10/runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py\", line 1312, in <module>\r\n main()\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/entrypoints.py\", line 18, in main\r\n ToolchainCL()\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py\", line 734, in __init__\r\n getattr(self, command)(args)\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py\", line 153, in wrapper_func\r\n build_dist_from_args(ctx, dist, args)\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/toolchain.py\", line 212, in build_dist_from_args\r\n build_recipes(build_order, python_modules, ctx,\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/build.py\", line 504, in build_recipes\r\n recipe.build_arch(arch)\r\n File \"/home/bobf/ex/hello/.buildozer/android/platform/python-for-android/pythonforandroid/recipes/vlc/__init__.py\", line 68, in build_arch\r\n shprint(sh.Command('./compile.sh'), _env=env,\r\n File \"/home/bobf/.local/lib/python3.10/site-packages/sh.py\", line 1342, in __init__\r\n raise CommandNotFound(path)\r\nsh.CommandNotFound: ./compile.sh\r\n========================================\r\n```\r\nsubsequent builds fail with \r\n```\r\n[DEBUG]: -> running git clone http://git.videolan.org/git/vlc-ports/android.git /home/bobf/ex/hello/.buildozer/android/platform/build-arm64-v8a/build/other_builds/vlc/arm64-v8a__ndk_target_21/vlc/vlc-port-android\r\n[DEBUG]: fatal: destination path '/home/bobf/ex/hello/.buildozer/android/platform/build-arm64-v8a/build/other_builds/vlc/arm64-v8a__ndk_target_21/vlc/vlc-port-android' already exists and is not an empty directory.\r\nException in thread background thread for pid 21212:\r\n```\r\n\n", "before_files": [{"content": "from pythonforandroid.toolchain import Recipe, current_directory\nfrom pythonforandroid.logger import info, debug, shprint, warning\nfrom os.path import join, isdir, isfile\nfrom os import environ\nimport sh\n\n\nclass VlcRecipe(Recipe):\n version = '3.0.0'\n url = None\n name = 'vlc'\n\n depends = []\n\n port_git = 'http://git.videolan.org/git/vlc-ports/android.git'\n# vlc_git = 'http://git.videolan.org/git/vlc.git'\n ENV_LIBVLC_AAR = 'LIBVLC_AAR'\n aars = {} # for future use of multiple arch\n\n def prebuild_arch(self, arch):\n super().prebuild_arch(arch)\n build_dir = self.get_build_dir(arch.arch)\n port_dir = join(build_dir, 'vlc-port-android')\n if self.ENV_LIBVLC_AAR in environ:\n aar = environ.get(self.ENV_LIBVLC_AAR)\n if isdir(aar):\n aar = join(aar, 'libvlc-{}.aar'.format(self.version))\n if not isfile(aar):\n warning(\"Error: {} is not valid libvlc-<ver>.aar bundle\".format(aar))\n info(\"check {} environment!\".format(self.ENV_LIBVLC_AAR))\n exit(1)\n self.aars[arch] = aar\n else:\n aar_path = join(port_dir, 'libvlc', 'build', 'outputs', 'aar')\n self.aars[arch] = aar = join(aar_path, 'libvlc-{}.aar'.format(self.version))\n warning(\"HINT: set path to precompiled libvlc-<ver>.aar bundle \"\n \"in {} environment!\".format(self.ENV_LIBVLC_AAR))\n info(\"libvlc-<ver>.aar should build \"\n \"from sources at {}\".format(port_dir))\n if not isfile(join(port_dir, 'compile.sh')):\n info(\"clone vlc port for android sources from {}\".format(\n self.port_git))\n shprint(sh.git, 'clone', self.port_git, port_dir,\n _tail=20, _critical=True)\n# now \"git clone ...\" is a part of compile.sh\n# vlc_dir = join(port_dir, 'vlc')\n# if not isfile(join(vlc_dir, 'Makefile.am')):\n# info(\"clone vlc sources from {}\".format(self.vlc_git))\n# shprint(sh.git, 'clone', self.vlc_git, vlc_dir,\n# _tail=20, _critical=True)\n\n def build_arch(self, arch):\n super().build_arch(arch)\n build_dir = self.get_build_dir(arch.arch)\n port_dir = join(build_dir, 'vlc-port-android')\n aar = self.aars[arch]\n if not isfile(aar):\n with current_directory(port_dir):\n env = dict(environ)\n env.update({\n 'ANDROID_ABI': arch.arch,\n 'ANDROID_NDK': self.ctx.ndk_dir,\n 'ANDROID_SDK': self.ctx.sdk_dir,\n })\n info(\"compiling vlc from sources\")\n debug(\"environment: {}\".format(env))\n if not isfile(join('bin', 'VLC-debug.apk')):\n shprint(sh.Command('./compile.sh'), _env=env,\n _tail=50, _critical=True)\n shprint(sh.Command('./compile-libvlc.sh'), _env=env,\n _tail=50, _critical=True)\n shprint(sh.cp, '-a', aar, self.ctx.aars_dir)\n\n\nrecipe = VlcRecipe()\n", "path": "pythonforandroid/recipes/vlc/__init__.py"}], "after_files": [{"content": "from pythonforandroid.toolchain import Recipe, current_directory\nfrom pythonforandroid.logger import info, debug, shprint, warning\nfrom os.path import join, isdir, isfile\nfrom os import environ\nimport sh\n\n\nclass VlcRecipe(Recipe):\n version = '3.0.18'\n url = None\n name = 'vlc'\n\n depends = []\n\n port_git = 'http://git.videolan.org/git/vlc-ports/android.git'\n# vlc_git = 'http://git.videolan.org/git/vlc.git'\n ENV_LIBVLC_AAR = 'LIBVLC_AAR'\n aars = {} # for future use of multiple arch\n\n def prebuild_arch(self, arch):\n super().prebuild_arch(arch)\n build_dir = self.get_build_dir(arch.arch)\n port_dir = join(build_dir, 'vlc-port-android')\n if self.ENV_LIBVLC_AAR in environ:\n aar = environ.get(self.ENV_LIBVLC_AAR)\n if isdir(aar):\n aar = join(aar, 'libvlc-{}.aar'.format(self.version))\n if not isfile(aar):\n warning(\"Error: {} is not valid libvlc-<ver>.aar bundle\".format(aar))\n info(\"check {} environment!\".format(self.ENV_LIBVLC_AAR))\n exit(1)\n self.aars[arch] = aar\n else:\n aar_path = join(port_dir, 'libvlc', 'build', 'outputs', 'aar')\n self.aars[arch] = aar = join(aar_path, 'libvlc-{}.aar'.format(self.version))\n warning(\"HINT: set path to precompiled libvlc-<ver>.aar bundle \"\n \"in {} environment!\".format(self.ENV_LIBVLC_AAR))\n info(\"libvlc-<ver>.aar should build \"\n \"from sources at {}\".format(port_dir))\n if not isfile(join(port_dir, 'compile.sh')):\n info(\"clone vlc port for android sources from {}\".format(\n self.port_git))\n shprint(sh.git, 'clone', self.port_git, port_dir,\n _tail=20, _critical=True)\n# now \"git clone ...\" is a part of compile.sh\n# vlc_dir = join(port_dir, 'vlc')\n# if not isfile(join(vlc_dir, 'Makefile.am')):\n# info(\"clone vlc sources from {}\".format(self.vlc_git))\n# shprint(sh.git, 'clone', self.vlc_git, vlc_dir,\n# _tail=20, _critical=True)\n\n def build_arch(self, arch):\n super().build_arch(arch)\n build_dir = self.get_build_dir(arch.arch)\n port_dir = join(build_dir, 'vlc-port-android', 'buildsystem')\n aar = self.aars[arch]\n if not isfile(aar):\n with current_directory(port_dir):\n env = dict(environ)\n env.update({\n 'ANDROID_ABI': arch.arch,\n 'ANDROID_NDK': self.ctx.ndk_dir,\n 'ANDROID_SDK': self.ctx.sdk_dir,\n })\n info(\"compiling vlc from sources\")\n debug(\"environment: {}\".format(env))\n if not isfile(join('bin', 'VLC-debug.apk')):\n shprint(sh.Command('./compile.sh'), _env=env,\n _tail=50, _critical=True)\n shprint(sh.Command('./compile-medialibrary.sh'), _env=env,\n _tail=50, _critical=True)\n shprint(sh.cp, '-a', aar, self.ctx.aars_dir)\n\n\nrecipe = VlcRecipe()\n", "path": "pythonforandroid/recipes/vlc/__init__.py"}]}
| 4,088 | 323 |
gh_patches_debug_20877
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-449
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Integration test with pyarrow nightly builds?
I'm not sure how difficult it would be to add, but it might be useful to regularly test the library against pyarrow nightly build's to catch regressions before they make their way into releases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import pathlib
18 import os
19 import shutil
20
21 import nox
22
23
24 BLACK_VERSION = "black==19.10b0"
25 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
26 CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
27
28 # 'docfx' is excluded since it only needs to run in 'docs-presubmit'
29 nox.options.sessions = [
30 "unit",
31 "system",
32 "snippets",
33 "cover",
34 "lint",
35 "lint_setup_py",
36 "blacken",
37 "docs",
38 ]
39
40
41 def default(session):
42 """Default unit test session.
43
44 This is intended to be run **without** an interpreter set, so
45 that the current ``python`` (on the ``PATH``) or the version of
46 Python corresponding to the ``nox`` binary the ``PATH`` can
47 run the tests.
48 """
49 constraints_path = str(
50 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
51 )
52
53 # Install all test dependencies, then install local packages in-place.
54 session.install(
55 "mock",
56 "pytest",
57 "google-cloud-testutils",
58 "pytest-cov",
59 "freezegun",
60 "-c",
61 constraints_path,
62 )
63
64 session.install("-e", ".[all]", "-c", constraints_path)
65
66 session.install("ipython", "-c", constraints_path)
67
68 # Run py.test against the unit tests.
69 session.run(
70 "py.test",
71 "--quiet",
72 "--cov=google.cloud.bigquery",
73 "--cov=tests.unit",
74 "--cov-append",
75 "--cov-config=.coveragerc",
76 "--cov-report=",
77 "--cov-fail-under=0",
78 os.path.join("tests", "unit"),
79 *session.posargs,
80 )
81
82
83 @nox.session(python=["3.6", "3.7", "3.8"])
84 def unit(session):
85 """Run the unit test suite."""
86 default(session)
87
88
89 @nox.session(python=["3.8"])
90 def system(session):
91 """Run the system test suite."""
92
93 constraints_path = str(
94 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
95 )
96
97 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
98 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
99 session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
100
101 # Sanity check: Only run system tests if the environment variable is set.
102 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
103 session.skip("Credentials must be set via environment variable.")
104
105 # Use pre-release gRPC for system tests.
106 session.install("--pre", "grpcio", "-c", constraints_path)
107
108 # Install all test dependencies, then install local packages in place.
109 session.install(
110 "mock", "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path
111 )
112 session.install("google-cloud-storage", "-c", constraints_path)
113
114 session.install("-e", ".[all]", "-c", constraints_path)
115 session.install("ipython", "-c", constraints_path)
116
117 # Run py.test against the system tests.
118 session.run(
119 "py.test", "--quiet", os.path.join("tests", "system.py"), *session.posargs
120 )
121
122
123 @nox.session(python=["3.8"])
124 def snippets(session):
125 """Run the snippets test suite."""
126
127 # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.
128 if os.environ.get("RUN_SNIPPETS_TESTS", "true") == "false":
129 session.skip("RUN_SNIPPETS_TESTS is set to false, skipping")
130
131 # Sanity check: Only run snippets tests if the environment variable is set.
132 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
133 session.skip("Credentials must be set via environment variable.")
134
135 constraints_path = str(
136 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
137 )
138
139 # Install all test dependencies, then install local packages in place.
140 session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
141 session.install("google-cloud-storage", "-c", constraints_path)
142 session.install("grpcio", "-c", constraints_path)
143
144 session.install("-e", ".[all]", "-c", constraints_path)
145
146 # Run py.test against the snippets tests.
147 # Skip tests in samples/snippets, as those are run in a different session
148 # using the nox config from that directory.
149 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
150 session.run(
151 "py.test",
152 "samples",
153 "--ignore=samples/snippets",
154 "--ignore=samples/geography",
155 *session.posargs,
156 )
157
158
159 @nox.session(python="3.8")
160 def cover(session):
161 """Run the final coverage report.
162
163 This outputs the coverage report aggregating coverage from the unit
164 test runs (not system test runs), and then erases coverage data.
165 """
166 session.install("coverage", "pytest-cov")
167 session.run("coverage", "report", "--show-missing", "--fail-under=100")
168 session.run("coverage", "erase")
169
170
171 @nox.session(python="3.8")
172 def lint(session):
173 """Run linters.
174
175 Returns a failure if the linters find linting errors or sufficiently
176 serious code quality issues.
177 """
178
179 session.install("flake8", BLACK_VERSION)
180 session.install("-e", ".")
181 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
182 session.run("flake8", "tests")
183 session.run("flake8", os.path.join("docs", "samples"))
184 session.run("flake8", os.path.join("docs", "snippets.py"))
185 session.run("black", "--check", *BLACK_PATHS)
186
187
188 @nox.session(python="3.8")
189 def lint_setup_py(session):
190 """Verify that setup.py is valid (including RST check)."""
191
192 session.install("docutils", "Pygments")
193 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
194
195
196 @nox.session(python="3.6")
197 def blacken(session):
198 """Run black.
199 Format code to uniform standard.
200
201 This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
202 That run uses an image that doesn't have 3.6 installed. Before updating this
203 check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
204 """
205 session.install(BLACK_VERSION)
206 session.run("black", *BLACK_PATHS)
207
208
209 @nox.session(python="3.8")
210 def docs(session):
211 """Build the docs."""
212
213 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
214 session.install("google-cloud-storage")
215 session.install("-e", ".[all]")
216
217 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
218 session.run(
219 "sphinx-build",
220 "-W", # warnings as errors
221 "-T", # show full traceback on exception
222 "-N", # no colors
223 "-b",
224 "html",
225 "-d",
226 os.path.join("docs", "_build", "doctrees", ""),
227 os.path.join("docs", ""),
228 os.path.join("docs", "_build", "html", ""),
229 )
230
231
232 @nox.session(python="3.8")
233 def docfx(session):
234 """Build the docfx yaml files for this library."""
235
236 session.install("-e", ".")
237 session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml")
238
239 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
240 session.run(
241 "sphinx-build",
242 "-T", # show full traceback on exception
243 "-N", # no colors
244 "-D",
245 (
246 "extensions=sphinx.ext.autodoc,"
247 "sphinx.ext.autosummary,"
248 "docfx_yaml.extension,"
249 "sphinx.ext.intersphinx,"
250 "sphinx.ext.coverage,"
251 "sphinx.ext.napoleon,"
252 "sphinx.ext.todo,"
253 "sphinx.ext.viewcode,"
254 "recommonmark"
255 ),
256 "-b",
257 "html",
258 "-d",
259 os.path.join("docs", "_build", "doctrees", ""),
260 os.path.join("docs", ""),
261 os.path.join("docs", "_build", "html", ""),
262 )
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -168,6 +168,38 @@
session.run("coverage", "erase")
[email protected](python="3.8")
+def prerelease_deps(session):
+ """Run all tests with prerelease versions of dependencies installed.
+
+ https://github.com/googleapis/python-bigquery/issues/95
+ """
+ # PyArrow prerelease packages are published to an alternative PyPI host.
+ # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages
+ session.install(
+ "--extra-index-url", "https://pypi.fury.io/arrow-nightlies/", "--pre", "pyarrow"
+ )
+ session.install("--pre", "grpcio", "pandas")
+ session.install(
+ "mock",
+ "pytest",
+ "google-cloud-testutils",
+ "pytest-cov",
+ "freezegun",
+ "IPython",
+ )
+ session.install("-e", ".[all]")
+
+ # Print out prerelease package versions.
+ session.run("python", "-c", "import grpc; print(grpc.__version__)")
+ session.run("python", "-c", "import pandas; print(pandas.__version__)")
+ session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)")
+
+ # Run all tests, except a few samples tests which require extra dependencies.
+ session.run("py.test", "tests")
+ session.run("py.test", "samples/tests")
+
+
@nox.session(python="3.8")
def lint(session):
"""Run linters.
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -168,6 +168,38 @@\n session.run(\"coverage\", \"erase\")\n \n \[email protected](python=\"3.8\")\n+def prerelease_deps(session):\n+ \"\"\"Run all tests with prerelease versions of dependencies installed.\n+\n+ https://github.com/googleapis/python-bigquery/issues/95\n+ \"\"\"\n+ # PyArrow prerelease packages are published to an alternative PyPI host.\n+ # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n+ session.install(\n+ \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n+ )\n+ session.install(\"--pre\", \"grpcio\", \"pandas\")\n+ session.install(\n+ \"mock\",\n+ \"pytest\",\n+ \"google-cloud-testutils\",\n+ \"pytest-cov\",\n+ \"freezegun\",\n+ \"IPython\",\n+ )\n+ session.install(\"-e\", \".[all]\")\n+\n+ # Print out prerelease package versions.\n+ session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n+ session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n+ session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n+\n+ # Run all tests, except a few samples tests which require extra dependencies.\n+ session.run(\"py.test\", \"tests\")\n+ session.run(\"py.test\", \"samples/tests\")\n+\n+\n @nox.session(python=\"3.8\")\n def lint(session):\n \"\"\"Run linters.\n", "issue": "Integration test with pyarrow nightly builds?\nI'm not sure how difficult it would be to add, but it might be useful to regularly test the library against pyarrow nightly build's to catch regressions before they make their way into releases.\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"docs\",\n]\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=\"3.8\")\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"docs\",\n]\n\n\ndef default(session):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google.cloud.bigquery\",\n \"--cov=tests.unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=[\"3.8\"])\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\n \"py.test\", \"--quiet\", os.path.join(\"tests\", \"system.py\"), *session.posargs\n )\n\n\[email protected](python=[\"3.8\"])\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=\"3.8\")\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=\"3.8\")\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"IPython\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=\"3.8\")\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=\"3.6\")\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n\n This currently uses Python 3.6 due to the automated Kokoro run of synthtool.\n That run uses an image that doesn't have 3.6 installed. Before updating this\n check the state of the `gcp_ubuntu_config` we use for that Kokoro run.\n \"\"\"\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=\"3.8\")\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=\"3.8\")\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
| 3,039 | 387 |
gh_patches_debug_28282
|
rasdani/github-patches
|
git_diff
|
getpelican__pelican-1040
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow nice URLs on devserver
When you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:
```
index index.php index.html index.htm index.shtml;
root /var/www/example.com/public_html;
try_files $uri $uri.html $uri/index.html index.html;
```
Now the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.
```
/output/index.html http://localhost:8000
/output/contact.html http://localhost:8000/contact
/output/sales/index.html http://localhost:8000/sales
/output/sales/index.html http://localhost:8000/sales/ (yes the same)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/server.py`
Content:
```
1 from __future__ import print_function
2 import sys
3 try:
4 import SimpleHTTPServer as srvmod
5 except ImportError:
6 import http.server as srvmod # NOQA
7
8 try:
9 import SocketServer as socketserver
10 except ImportError:
11 import socketserver # NOQA
12
13 PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000
14
15 Handler = srvmod.SimpleHTTPRequestHandler
16
17 try:
18 httpd = socketserver.TCPServer(("", PORT), Handler)
19 except OSError as e:
20 print("Could not listen on port", PORT)
21 sys.exit(getattr(e, 'exitcode', 1))
22
23
24 print("serving at port", PORT)
25 try:
26 httpd.serve_forever()
27 except KeyboardInterrupt as e:
28 print("shutting down server")
29 httpd.socket.close()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pelican/server.py b/pelican/server.py
--- a/pelican/server.py
+++ b/pelican/server.py
@@ -1,5 +1,7 @@
from __future__ import print_function
+import os
import sys
+import logging
try:
import SimpleHTTPServer as srvmod
except ImportError:
@@ -11,19 +13,36 @@
import socketserver # NOQA
PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000
+SUFFIXES = ['','.html','/index.html']
-Handler = srvmod.SimpleHTTPRequestHandler
+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
+ def do_GET(self):
+ # we are trying to detect the file by having a fallback mechanism
+ r = None
+ for suffix in SUFFIXES:
+ if not hasattr(self,'original_path'):
+ self.original_path = self.path
+ self.path = self.original_path + suffix
+ path = self.translate_path(self.path)
+ if os.path.exists(path):
+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)
+ if r is not None:
+ break
+ logging.warning("Unable to find %s file." % self.path)
+ return r
+
+Handler = ComplexHTTPRequestHandler
try:
httpd = socketserver.TCPServer(("", PORT), Handler)
except OSError as e:
- print("Could not listen on port", PORT)
+ logging.error("Could not listen on port %s" % PORT)
sys.exit(getattr(e, 'exitcode', 1))
-print("serving at port", PORT)
+logging.info("serving at port %s" % PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt as e:
- print("shutting down server")
- httpd.socket.close()
+ logging.info("shutting down server")
+ httpd.socket.close()
\ No newline at end of file
|
{"golden_diff": "diff --git a/pelican/server.py b/pelican/server.py\n--- a/pelican/server.py\n+++ b/pelican/server.py\n@@ -1,5 +1,7 @@\n from __future__ import print_function\n+import os\n import sys\n+import logging\n try:\n import SimpleHTTPServer as srvmod\n except ImportError:\n@@ -11,19 +13,36 @@\n import socketserver # NOQA\n \n PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n+SUFFIXES = ['','.html','/index.html']\n \n-Handler = srvmod.SimpleHTTPRequestHandler\n+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n+ def do_GET(self):\n+ # we are trying to detect the file by having a fallback mechanism\n+ r = None\n+ for suffix in SUFFIXES:\n+ if not hasattr(self,'original_path'):\n+ self.original_path = self.path\n+ self.path = self.original_path + suffix\n+ path = self.translate_path(self.path)\n+ if os.path.exists(path):\n+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)\n+ if r is not None:\n+ break\n+ logging.warning(\"Unable to find %s file.\" % self.path)\n+ return r\n+\n+Handler = ComplexHTTPRequestHandler\n \n try:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\n except OSError as e:\n- print(\"Could not listen on port\", PORT)\n+ logging.error(\"Could not listen on port %s\" % PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n \n \n-print(\"serving at port\", PORT)\n+logging.info(\"serving at port %s\" % PORT)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt as e:\n- print(\"shutting down server\")\n- httpd.socket.close()\n+ logging.info(\"shutting down server\")\n+ httpd.socket.close()\n\\ No newline at end of file\n", "issue": "Allow nice URLs on devserver\nWhen you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:\n\n```\nindex index.php index.html index.htm index.shtml;\nroot /var/www/example.com/public_html;\ntry_files $uri $uri.html $uri/index.html index.html;\n```\n\nNow the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.\n\n```\n/output/index.html http://localhost:8000\n/output/contact.html http://localhost:8000/contact\n/output/sales/index.html http://localhost:8000/sales\n/output/sales/index.html http://localhost:8000/sales/ (yes the same) \n```\n\n", "before_files": [{"content": "from __future__ import print_function\nimport sys\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n\nHandler = srvmod.SimpleHTTPRequestHandler\n\ntry:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\nexcept OSError as e:\n print(\"Could not listen on port\", PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nprint(\"serving at port\", PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n print(\"shutting down server\")\n httpd.socket.close()\n", "path": "pelican/server.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os\nimport sys\nimport logging\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\nSUFFIXES = ['','.html','/index.html']\n\nclass ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n def do_GET(self):\n # we are trying to detect the file by having a fallback mechanism\n r = None\n for suffix in SUFFIXES:\n if not hasattr(self,'original_path'):\n self.original_path = self.path\n self.path = self.original_path + suffix\n path = self.translate_path(self.path)\n if os.path.exists(path):\n r = srvmod.SimpleHTTPRequestHandler.do_GET(self)\n if r is not None:\n break\n logging.warning(\"Unable to find %s file.\" % self.path)\n return r\n\nHandler = ComplexHTTPRequestHandler\n\ntry:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\nexcept OSError as e:\n logging.error(\"Could not listen on port %s\" % PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nlogging.info(\"serving at port %s\" % PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n logging.info(\"shutting down server\")\n httpd.socket.close()", "path": "pelican/server.py"}]}
| 671 | 440 |
gh_patches_debug_25372
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-739
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks
## 🐛 Bug
If an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return "done" as True.
## To Reproduce
Steps to reproduce the behavior:
```
env = TimeLimit(env, step_limit)
env = CycleOverBenchmarks(env, benchmarks)
_, done, _, _ = env.reset()
while not done:
_, done, _, _ = env.step(0)
```
This will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior.
## Additional context
Assign it to me, I will fix it when I got time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/wrappers/time_limit.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 from typing import Optional
6
7 from compiler_gym.envs import CompilerEnv
8 from compiler_gym.util.gym_type_hints import ActionType
9 from compiler_gym.wrappers.core import CompilerEnvWrapper
10
11
12 class TimeLimit(CompilerEnvWrapper):
13 """A step-limited wrapper that is compatible with CompilerGym.
14
15 Example usage:
16
17 >>> env = TimeLimit(env, max_episode_steps=3)
18 >>> env.reset()
19 >>> _, _, done, _ = env.step(0)
20 >>> _, _, done, _ = env.step(0)
21 >>> _, _, done, _ = env.step(0)
22 >>> done
23 True
24 """
25
26 def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):
27 super().__init__(env=env)
28 if max_episode_steps is None and self.env.spec is not None:
29 max_episode_steps = env.spec.max_episode_steps
30 if self.env.spec is not None:
31 self.env.spec.max_episode_steps = max_episode_steps
32 self._max_episode_steps = max_episode_steps
33 self._elapsed_steps = None
34
35 def step(self, action: ActionType, **kwargs):
36 assert (
37 self._elapsed_steps is not None
38 ), "Cannot call env.step() before calling reset()"
39 observation, reward, done, info = self.env.step(action, **kwargs)
40 self._elapsed_steps += 1
41 if self._elapsed_steps >= self._max_episode_steps:
42 info["TimeLimit.truncated"] = not done
43 done = True
44 return observation, reward, done, info
45
46 def reset(self, **kwargs):
47 self._elapsed_steps = 0
48 return self.env.reset(**kwargs)
49
50 def fork(self) -> "TimeLimit":
51 """Fork the wrapped environment.
52
53 The time limit state of the forked environment is the same as the source
54 state.
55 """
56 fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)
57 fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access
58 return fkd
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py
--- a/compiler_gym/wrappers/time_limit.py
+++ b/compiler_gym/wrappers/time_limit.py
@@ -2,7 +2,7 @@
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Optional
+from typing import Iterable, Optional
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
@@ -32,12 +32,13 @@
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
- def step(self, action: ActionType, **kwargs):
+ def multistep(self, actions: Iterable[ActionType], **kwargs):
+ actions = list(actions)
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
- observation, reward, done, info = self.env.step(action, **kwargs)
- self._elapsed_steps += 1
+ observation, reward, done, info = self.env.multistep(actions, **kwargs)
+ self._elapsed_steps += len(actions)
if self._elapsed_steps >= self._max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
|
{"golden_diff": "diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py\n--- a/compiler_gym/wrappers/time_limit.py\n+++ b/compiler_gym/wrappers/time_limit.py\n@@ -2,7 +2,7 @@\n #\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n-from typing import Optional\n+from typing import Iterable, Optional\n \n from compiler_gym.envs import CompilerEnv\n from compiler_gym.util.gym_type_hints import ActionType\n@@ -32,12 +32,13 @@\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n \n- def step(self, action: ActionType, **kwargs):\n+ def multistep(self, actions: Iterable[ActionType], **kwargs):\n+ actions = list(actions)\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n- observation, reward, done, info = self.env.step(action, **kwargs)\n- self._elapsed_steps += 1\n+ observation, reward, done, info = self.env.multistep(actions, **kwargs)\n+ self._elapsed_steps += len(actions)\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n", "issue": "Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks\n## \ud83d\udc1b Bug\r\n\r\nIf an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return \"done\" as True. \r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```\r\nenv = TimeLimit(env, step_limit) \r\nenv = CycleOverBenchmarks(env, benchmarks) \r\n_, done, _, _ = env.reset()\r\nwhile not done:\r\n _, done, _, _ = env.step(0) \r\n```\r\nThis will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior. \r\n\r\n\r\n## Additional context\r\n\r\nAssign it to me, I will fix it when I got time. \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Optional\n\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.gym_type_hints import ActionType\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\n\nclass TimeLimit(CompilerEnvWrapper):\n \"\"\"A step-limited wrapper that is compatible with CompilerGym.\n\n Example usage:\n\n >>> env = TimeLimit(env, max_episode_steps=3)\n >>> env.reset()\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> done\n True\n \"\"\"\n\n def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):\n super().__init__(env=env)\n if max_episode_steps is None and self.env.spec is not None:\n max_episode_steps = env.spec.max_episode_steps\n if self.env.spec is not None:\n self.env.spec.max_episode_steps = max_episode_steps\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n\n def step(self, action: ActionType, **kwargs):\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n observation, reward, done, info = self.env.step(action, **kwargs)\n self._elapsed_steps += 1\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n def fork(self) -> \"TimeLimit\":\n \"\"\"Fork the wrapped environment.\n\n The time limit state of the forked environment is the same as the source\n state.\n \"\"\"\n fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)\n fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access\n return fkd\n", "path": "compiler_gym/wrappers/time_limit.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Iterable, Optional\n\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.gym_type_hints import ActionType\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\n\nclass TimeLimit(CompilerEnvWrapper):\n \"\"\"A step-limited wrapper that is compatible with CompilerGym.\n\n Example usage:\n\n >>> env = TimeLimit(env, max_episode_steps=3)\n >>> env.reset()\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> done\n True\n \"\"\"\n\n def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):\n super().__init__(env=env)\n if max_episode_steps is None and self.env.spec is not None:\n max_episode_steps = env.spec.max_episode_steps\n if self.env.spec is not None:\n self.env.spec.max_episode_steps = max_episode_steps\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n\n def multistep(self, actions: Iterable[ActionType], **kwargs):\n actions = list(actions)\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n observation, reward, done, info = self.env.multistep(actions, **kwargs)\n self._elapsed_steps += len(actions)\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n def fork(self) -> \"TimeLimit\":\n \"\"\"Fork the wrapped environment.\n\n The time limit state of the forked environment is the same as the source\n state.\n \"\"\"\n fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)\n fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access\n return fkd\n", "path": "compiler_gym/wrappers/time_limit.py"}]}
| 1,020 | 304 |
gh_patches_debug_55957
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-2909
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OrderCancel mutation throws error when asked not to restock inventory
### What I'm trying to achieve
Cancel order without doing restock.
### Steps to reproduce the problem
1. Execute query
```
mutation {
orderCancel(id: "T3JkZXI6MTQ=", restock: false) {
errors {
field
message
}
order {
id
}
}
}
```
2. Get an error
3. Order is cancelled anyway
### What I expected to happen
To work perfectly. Note: if `restock: true`, mutation executes properly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/order/mutations/orders.py`
Content:
```
1 import graphene
2 from django.utils.translation import pgettext_lazy
3 from graphql_jwt.decorators import permission_required
4 from payments import PaymentError, PaymentStatus
5
6 from ....core.utils.taxes import ZERO_TAXED_MONEY
7 from ....order import CustomPaymentChoices, OrderEvents, models
8 from ....order.utils import cancel_order
9 from ....shipping.models import ShippingMethod as ShippingMethodModel
10 from ...account.types import AddressInput
11 from ...core.mutations import BaseMutation
12 from ...core.types.common import Decimal, Error
13 from ...order.mutations.draft_orders import DraftOrderUpdate
14 from ...order.types import Order, OrderEvent
15 from ...shipping.types import ShippingMethod
16
17
18 def clean_order_update_shipping(order, method, errors):
19 if not method:
20 return errors
21 if not order.shipping_address:
22 errors.append(
23 Error(
24 field='order',
25 message=(
26 'Cannot choose a shipping method for an '
27 'order without the shipping address.')))
28 return errors
29 valid_methods = (
30 ShippingMethodModel.objects.applicable_shipping_methods(
31 price=order.get_subtotal().gross.amount,
32 weight=order.get_total_weight(),
33 country_code=order.shipping_address.country.code))
34 valid_methods = valid_methods.values_list('id', flat=True)
35 if method.pk not in valid_methods:
36 errors.append(
37 Error(
38 field='shippingMethod',
39 message='Shipping method cannot be used with this order.'))
40 return errors
41
42
43 def try_payment_action(action, money, errors):
44 try:
45 action(money)
46 except (PaymentError, ValueError) as e:
47 errors.append(Error(field='payment', message=str(e)))
48
49
50 def clean_order_cancel(order, errors):
51 if order and not order.can_cancel():
52 errors.append(
53 Error(
54 field='order',
55 message='This order can\'t be canceled.'))
56 return errors
57
58
59 def clean_order_mark_as_paid(order, errors):
60 if order and order.payments.exists():
61 errors.append(
62 Error(
63 field='payment',
64 message='Orders with payments can not be manually '
65 'marked as paid.'))
66 return errors
67
68
69 def clean_order_capture(payment, amount, errors):
70 if not payment:
71 errors.append(
72 Error(
73 field='payment',
74 message='There\'s no payment associated with the order.'))
75 return errors
76 if payment.status != PaymentStatus.PREAUTH:
77 errors.append(
78 Error(
79 field='payment',
80 message='Only pre-authorized payments can be captured'))
81 return errors
82
83
84 def clean_release_payment(payment, errors):
85 """Check for payment errors."""
86 if payment.status != PaymentStatus.PREAUTH:
87 errors.append(
88 Error(field='payment',
89 message='Only pre-authorized payments can be released'))
90 try:
91 payment.release()
92 except (PaymentError, ValueError) as e:
93 errors.append(Error(field='payment', message=str(e)))
94 return errors
95
96
97 def clean_refund_payment(payment, amount, errors):
98 if payment.variant == CustomPaymentChoices.MANUAL:
99 errors.append(
100 Error(field='payment',
101 message='Manual payments can not be refunded.'))
102 return errors
103
104
105 class OrderUpdateInput(graphene.InputObjectType):
106 billing_address = AddressInput(
107 description='Billing address of the customer.')
108 user_email = graphene.String(description='Email address of the customer.')
109 shipping_address = AddressInput(
110 description='Shipping address of the customer.')
111
112
113 class OrderUpdate(DraftOrderUpdate):
114 class Arguments:
115 id = graphene.ID(
116 required=True, description='ID of an order to update.')
117 input = OrderUpdateInput(
118 required=True,
119 description='Fields required to update an order.')
120
121 class Meta:
122 description = 'Updates an order.'
123 model = models.Order
124
125
126 class OrderUpdateShippingInput(graphene.InputObjectType):
127 shipping_method = graphene.ID(
128 description='ID of the selected shipping method.',
129 name='shippingMethod')
130
131
132 class OrderUpdateShipping(BaseMutation):
133 order = graphene.Field(
134 Order, description='Order with updated shipping method.')
135
136 class Arguments:
137 id = graphene.ID(
138 required=True, name='order',
139 description='ID of the order to update a shipping method.')
140 input = OrderUpdateShippingInput(
141 description='Fields required to change '
142 'shipping method of the order.')
143
144 class Meta:
145 description = 'Updates a shipping method of the order.'
146
147 @classmethod
148 @permission_required('order.manage_orders')
149 def mutate(cls, root, info, id, input):
150 errors = []
151 order = cls.get_node_or_error(info, id, errors, 'id', Order)
152
153 if not input['shipping_method']:
154 if order.is_shipping_required():
155 cls.add_error(
156 errors, 'shippingMethod',
157 'Shipping method is required for this order.')
158 return OrderUpdateShipping(errors=errors)
159 order.shipping_method = None
160 order.shipping_price = ZERO_TAXED_MONEY
161 order.shipping_method_name = None
162 order.save(
163 update_fields=[
164 'shipping_method', 'shipping_price_net',
165 'shipping_price_gross', 'shipping_method_name'])
166 return OrderUpdateShipping(order=order)
167
168 method = cls.get_node_or_error(
169 info, input['shipping_method'], errors,
170 'shipping_method', ShippingMethod)
171 clean_order_update_shipping(order, method, errors)
172 if errors:
173 return OrderUpdateShipping(errors=errors)
174
175 order.shipping_method = method
176 order.shipping_price = method.get_total_price(info.context.taxes)
177 order.shipping_method_name = method.name
178 order.save(
179 update_fields=[
180 'shipping_method', 'shipping_method_name',
181 'shipping_price_net', 'shipping_price_gross'])
182 return OrderUpdateShipping(order=order)
183
184
185 class OrderAddNoteInput(graphene.InputObjectType):
186 message = graphene.String(description='Note message.', name='message')
187
188
189 class OrderAddNote(BaseMutation):
190 order = graphene.Field(Order, description='Order with the note added.')
191 event = graphene.Field(OrderEvent, description='Order note created.')
192
193 class Arguments:
194 id = graphene.ID(
195 required=True,
196 description='ID of the order to add a note for.', name='order')
197 input = OrderAddNoteInput(
198 required=True,
199 description='Fields required to create a note for the order.')
200
201 class Meta:
202 description = 'Adds note to the order.'
203
204 @classmethod
205 @permission_required('order.manage_orders')
206 def mutate(cls, root, info, id, input):
207 errors = []
208 order = cls.get_node_or_error(info, id, errors, 'id', Order)
209 if errors:
210 return OrderAddNote(errors=errors)
211
212 event = order.events.create(
213 type=OrderEvents.NOTE_ADDED.value,
214 user=info.context.user,
215 parameters={
216 'message': input['message']})
217 return OrderAddNote(order=order, event=event)
218
219
220 class OrderCancel(BaseMutation):
221 order = graphene.Field(Order, description='Canceled order.')
222
223 class Arguments:
224 id = graphene.ID(
225 required=True, description='ID of the order to cancel.')
226 restock = graphene.Boolean(
227 required=True,
228 description='Determine if lines will be restocked or not.')
229
230 class Meta:
231 description = 'Cancel an order.'
232
233 @classmethod
234 @permission_required('order.manage_orders')
235 def mutate(cls, root, info, id, restock):
236 errors = []
237 order = cls.get_node_or_error(info, id, errors, 'id', Order)
238 clean_order_cancel(order, errors)
239 if errors:
240 return OrderCancel(errors=errors)
241
242 cancel_order(order=order, restock=restock)
243 if restock:
244 order.events.create(
245 type=OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value,
246 user=info.context.user,
247 parameters={'quantity': order.get_total_quantity()})
248 else:
249 order.events.create(
250 type=OrderEvents.ORDER_CANCELED.value,
251 user=info.context.user)
252 return OrderCancel(order=order)
253
254
255 class OrderMarkAsPaid(BaseMutation):
256 order = graphene.Field(Order, description='Order marked as paid.')
257
258 class Arguments:
259 id = graphene.ID(
260 required=True, description='ID of the order to mark paid.')
261
262 class Meta:
263 description = 'Mark order as manually paid.'
264
265 @classmethod
266 @permission_required('order.manage_orders')
267 def mutate(cls, root, info, id):
268 errors = []
269 order = cls.get_node_or_error(info, id, errors, 'id', Order)
270 clean_order_mark_as_paid(order, errors)
271 if errors:
272 return OrderMarkAsPaid(errors=errors)
273
274 defaults = {
275 'total': order.total.gross.amount,
276 'tax': order.total.tax.amount,
277 'currency': order.total.currency,
278 'delivery': order.shipping_price.net.amount,
279 'description': pgettext_lazy(
280 'Payment description', 'Order %(order)s') % {'order': order},
281 'captured_amount': order.total.gross.amount}
282 models.Payment.objects.get_or_create(
283 variant=CustomPaymentChoices.MANUAL,
284 status=PaymentStatus.CONFIRMED, order=order,
285 defaults=defaults)
286
287 order.events.create(
288 type=OrderEvents.ORDER_MARKED_AS_PAID.value,
289 user=info.context.user)
290 return OrderMarkAsPaid(order=order)
291
292
293 class OrderCapture(BaseMutation):
294 order = graphene.Field(Order, description='Captured order.')
295
296 class Arguments:
297 id = graphene.ID(
298 required=True, description='ID of the order to capture.')
299 amount = Decimal(
300 required=True, description='Amount of money to capture.')
301
302 class Meta:
303 description = 'Capture an order.'
304
305 @classmethod
306 @permission_required('order.manage_orders')
307 def mutate(cls, root, info, id, amount):
308 errors = []
309 order = cls.get_node_or_error(info, id, errors, 'id', Order)
310 payment = order.get_last_payment()
311 clean_order_capture(payment, amount, errors)
312 try_payment_action(payment.capture, amount, errors)
313 if errors:
314 return OrderCapture(errors=errors)
315
316 order.events.create(
317 parameters={'amount': amount},
318 type=OrderEvents.PAYMENT_CAPTURED.value,
319 user=info.context.user)
320 return OrderCapture(order=order)
321
322
323 class OrderRelease(BaseMutation):
324 order = graphene.Field(Order, description='A released order.')
325
326 class Arguments:
327 id = graphene.ID(
328 required=True, description='ID of the order to release.')
329
330 class Meta:
331 description = 'Release an order.'
332
333 @classmethod
334 @permission_required('order.manage_orders')
335 def mutate(cls, root, info, id):
336 errors = []
337 order = cls.get_node_or_error(info, id, errors, 'id', Order)
338 if order:
339 payment = order.get_last_payment()
340 clean_release_payment(payment, errors)
341
342 if errors:
343 return OrderRelease(errors=errors)
344
345 order.events.create(
346 type=OrderEvents.PAYMENT_RELEASED.value,
347 user=info.context.user)
348 return OrderRelease(order=order)
349
350
351 class OrderRefund(BaseMutation):
352 order = graphene.Field(Order, description='A refunded order.')
353
354 class Arguments:
355 id = graphene.ID(
356 required=True, description='ID of the order to refund.')
357 amount = Decimal(
358 required=True, description='Amount of money to refund.')
359
360 class Meta:
361 description = 'Refund an order.'
362
363 @classmethod
364 @permission_required('order.manage_orders')
365 def mutate(cls, root, info, id, amount):
366 errors = []
367 order = cls.get_node_or_error(info, id, errors, 'id', Order)
368 if order:
369 payment = order.get_last_payment()
370 clean_refund_payment(payment, amount, errors)
371 try_payment_action(payment.refund, amount, errors)
372 if errors:
373 return OrderRefund(errors=errors)
374
375 order.events.create(
376 type=OrderEvents.PAYMENT_REFUNDED.value,
377 user=info.context.user,
378 parameters={'amount': amount})
379 return OrderRefund(order=order)
380
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/saleor/graphql/order/mutations/orders.py b/saleor/graphql/order/mutations/orders.py
--- a/saleor/graphql/order/mutations/orders.py
+++ b/saleor/graphql/order/mutations/orders.py
@@ -247,8 +247,7 @@
parameters={'quantity': order.get_total_quantity()})
else:
order.events.create(
- type=OrderEvents.ORDER_CANCELED.value,
- user=info.context.user)
+ type=OrderEvents.CANCELED.value, user=info.context.user)
return OrderCancel(order=order)
|
{"golden_diff": "diff --git a/saleor/graphql/order/mutations/orders.py b/saleor/graphql/order/mutations/orders.py\n--- a/saleor/graphql/order/mutations/orders.py\n+++ b/saleor/graphql/order/mutations/orders.py\n@@ -247,8 +247,7 @@\n parameters={'quantity': order.get_total_quantity()})\n else:\n order.events.create(\n- type=OrderEvents.ORDER_CANCELED.value,\n- user=info.context.user)\n+ type=OrderEvents.CANCELED.value, user=info.context.user)\n return OrderCancel(order=order)\n", "issue": "OrderCancel mutation throws error when asked not to restock inventory\n### What I'm trying to achieve\r\nCancel order without doing restock.\r\n\r\n### Steps to reproduce the problem\r\n1. Execute query\r\n```\r\nmutation {\r\n orderCancel(id: \"T3JkZXI6MTQ=\", restock: false) {\r\n errors {\r\n field\r\n message\r\n }\r\n order {\r\n id\r\n }\r\n }\r\n}\r\n```\r\n2. Get an error\r\n3. Order is cancelled anyway\r\n\r\n### What I expected to happen\r\nTo work perfectly. Note: if `restock: true`, mutation executes properly.\r\n\n", "before_files": [{"content": "import graphene\nfrom django.utils.translation import pgettext_lazy\nfrom graphql_jwt.decorators import permission_required\nfrom payments import PaymentError, PaymentStatus\n\nfrom ....core.utils.taxes import ZERO_TAXED_MONEY\nfrom ....order import CustomPaymentChoices, OrderEvents, models\nfrom ....order.utils import cancel_order\nfrom ....shipping.models import ShippingMethod as ShippingMethodModel\nfrom ...account.types import AddressInput\nfrom ...core.mutations import BaseMutation\nfrom ...core.types.common import Decimal, Error\nfrom ...order.mutations.draft_orders import DraftOrderUpdate\nfrom ...order.types import Order, OrderEvent\nfrom ...shipping.types import ShippingMethod\n\n\ndef clean_order_update_shipping(order, method, errors):\n if not method:\n return errors\n if not order.shipping_address:\n errors.append(\n Error(\n field='order',\n message=(\n 'Cannot choose a shipping method for an '\n 'order without the shipping address.')))\n return errors\n valid_methods = (\n ShippingMethodModel.objects.applicable_shipping_methods(\n price=order.get_subtotal().gross.amount,\n weight=order.get_total_weight(),\n country_code=order.shipping_address.country.code))\n valid_methods = valid_methods.values_list('id', flat=True)\n if method.pk not in valid_methods:\n errors.append(\n Error(\n field='shippingMethod',\n message='Shipping method cannot be used with this order.'))\n return errors\n\n\ndef try_payment_action(action, money, errors):\n try:\n action(money)\n except (PaymentError, ValueError) as e:\n errors.append(Error(field='payment', message=str(e)))\n\n\ndef clean_order_cancel(order, errors):\n if order and not order.can_cancel():\n errors.append(\n Error(\n field='order',\n message='This order can\\'t be canceled.'))\n return errors\n\n\ndef clean_order_mark_as_paid(order, errors):\n if order and order.payments.exists():\n errors.append(\n Error(\n field='payment',\n message='Orders with payments can not be manually '\n 'marked as paid.'))\n return errors\n\n\ndef clean_order_capture(payment, amount, errors):\n if not payment:\n errors.append(\n Error(\n field='payment',\n message='There\\'s no payment associated with the order.'))\n return errors\n if payment.status != PaymentStatus.PREAUTH:\n errors.append(\n Error(\n field='payment',\n message='Only pre-authorized payments can be captured'))\n return errors\n\n\ndef clean_release_payment(payment, errors):\n \"\"\"Check for payment errors.\"\"\"\n if payment.status != PaymentStatus.PREAUTH:\n errors.append(\n Error(field='payment',\n message='Only pre-authorized payments can be released'))\n try:\n payment.release()\n except (PaymentError, ValueError) as e:\n errors.append(Error(field='payment', message=str(e)))\n return errors\n\n\ndef clean_refund_payment(payment, amount, errors):\n if payment.variant == CustomPaymentChoices.MANUAL:\n errors.append(\n Error(field='payment',\n message='Manual payments can not be refunded.'))\n return errors\n\n\nclass OrderUpdateInput(graphene.InputObjectType):\n billing_address = AddressInput(\n description='Billing address of the customer.')\n user_email = graphene.String(description='Email address of the customer.')\n shipping_address = AddressInput(\n description='Shipping address of the customer.')\n\n\nclass OrderUpdate(DraftOrderUpdate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of an order to update.')\n input = OrderUpdateInput(\n required=True,\n description='Fields required to update an order.')\n\n class Meta:\n description = 'Updates an order.'\n model = models.Order\n\n\nclass OrderUpdateShippingInput(graphene.InputObjectType):\n shipping_method = graphene.ID(\n description='ID of the selected shipping method.',\n name='shippingMethod')\n\n\nclass OrderUpdateShipping(BaseMutation):\n order = graphene.Field(\n Order, description='Order with updated shipping method.')\n\n class Arguments:\n id = graphene.ID(\n required=True, name='order',\n description='ID of the order to update a shipping method.')\n input = OrderUpdateShippingInput(\n description='Fields required to change '\n 'shipping method of the order.')\n\n class Meta:\n description = 'Updates a shipping method of the order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, input):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n\n if not input['shipping_method']:\n if order.is_shipping_required():\n cls.add_error(\n errors, 'shippingMethod',\n 'Shipping method is required for this order.')\n return OrderUpdateShipping(errors=errors)\n order.shipping_method = None\n order.shipping_price = ZERO_TAXED_MONEY\n order.shipping_method_name = None\n order.save(\n update_fields=[\n 'shipping_method', 'shipping_price_net',\n 'shipping_price_gross', 'shipping_method_name'])\n return OrderUpdateShipping(order=order)\n\n method = cls.get_node_or_error(\n info, input['shipping_method'], errors,\n 'shipping_method', ShippingMethod)\n clean_order_update_shipping(order, method, errors)\n if errors:\n return OrderUpdateShipping(errors=errors)\n\n order.shipping_method = method\n order.shipping_price = method.get_total_price(info.context.taxes)\n order.shipping_method_name = method.name\n order.save(\n update_fields=[\n 'shipping_method', 'shipping_method_name',\n 'shipping_price_net', 'shipping_price_gross'])\n return OrderUpdateShipping(order=order)\n\n\nclass OrderAddNoteInput(graphene.InputObjectType):\n message = graphene.String(description='Note message.', name='message')\n\n\nclass OrderAddNote(BaseMutation):\n order = graphene.Field(Order, description='Order with the note added.')\n event = graphene.Field(OrderEvent, description='Order note created.')\n\n class Arguments:\n id = graphene.ID(\n required=True,\n description='ID of the order to add a note for.', name='order')\n input = OrderAddNoteInput(\n required=True,\n description='Fields required to create a note for the order.')\n\n class Meta:\n description = 'Adds note to the order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, input):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n if errors:\n return OrderAddNote(errors=errors)\n\n event = order.events.create(\n type=OrderEvents.NOTE_ADDED.value,\n user=info.context.user,\n parameters={\n 'message': input['message']})\n return OrderAddNote(order=order, event=event)\n\n\nclass OrderCancel(BaseMutation):\n order = graphene.Field(Order, description='Canceled order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to cancel.')\n restock = graphene.Boolean(\n required=True,\n description='Determine if lines will be restocked or not.')\n\n class Meta:\n description = 'Cancel an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, restock):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n clean_order_cancel(order, errors)\n if errors:\n return OrderCancel(errors=errors)\n\n cancel_order(order=order, restock=restock)\n if restock:\n order.events.create(\n type=OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value,\n user=info.context.user,\n parameters={'quantity': order.get_total_quantity()})\n else:\n order.events.create(\n type=OrderEvents.ORDER_CANCELED.value,\n user=info.context.user)\n return OrderCancel(order=order)\n\n\nclass OrderMarkAsPaid(BaseMutation):\n order = graphene.Field(Order, description='Order marked as paid.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to mark paid.')\n\n class Meta:\n description = 'Mark order as manually paid.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n clean_order_mark_as_paid(order, errors)\n if errors:\n return OrderMarkAsPaid(errors=errors)\n\n defaults = {\n 'total': order.total.gross.amount,\n 'tax': order.total.tax.amount,\n 'currency': order.total.currency,\n 'delivery': order.shipping_price.net.amount,\n 'description': pgettext_lazy(\n 'Payment description', 'Order %(order)s') % {'order': order},\n 'captured_amount': order.total.gross.amount}\n models.Payment.objects.get_or_create(\n variant=CustomPaymentChoices.MANUAL,\n status=PaymentStatus.CONFIRMED, order=order,\n defaults=defaults)\n\n order.events.create(\n type=OrderEvents.ORDER_MARKED_AS_PAID.value,\n user=info.context.user)\n return OrderMarkAsPaid(order=order)\n\n\nclass OrderCapture(BaseMutation):\n order = graphene.Field(Order, description='Captured order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to capture.')\n amount = Decimal(\n required=True, description='Amount of money to capture.')\n\n class Meta:\n description = 'Capture an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, amount):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n payment = order.get_last_payment()\n clean_order_capture(payment, amount, errors)\n try_payment_action(payment.capture, amount, errors)\n if errors:\n return OrderCapture(errors=errors)\n\n order.events.create(\n parameters={'amount': amount},\n type=OrderEvents.PAYMENT_CAPTURED.value,\n user=info.context.user)\n return OrderCapture(order=order)\n\n\nclass OrderRelease(BaseMutation):\n order = graphene.Field(Order, description='A released order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to release.')\n\n class Meta:\n description = 'Release an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n if order:\n payment = order.get_last_payment()\n clean_release_payment(payment, errors)\n\n if errors:\n return OrderRelease(errors=errors)\n\n order.events.create(\n type=OrderEvents.PAYMENT_RELEASED.value,\n user=info.context.user)\n return OrderRelease(order=order)\n\n\nclass OrderRefund(BaseMutation):\n order = graphene.Field(Order, description='A refunded order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to refund.')\n amount = Decimal(\n required=True, description='Amount of money to refund.')\n\n class Meta:\n description = 'Refund an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, amount):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n if order:\n payment = order.get_last_payment()\n clean_refund_payment(payment, amount, errors)\n try_payment_action(payment.refund, amount, errors)\n if errors:\n return OrderRefund(errors=errors)\n\n order.events.create(\n type=OrderEvents.PAYMENT_REFUNDED.value,\n user=info.context.user,\n parameters={'amount': amount})\n return OrderRefund(order=order)\n", "path": "saleor/graphql/order/mutations/orders.py"}], "after_files": [{"content": "import graphene\nfrom django.utils.translation import pgettext_lazy\nfrom graphql_jwt.decorators import permission_required\nfrom payments import PaymentError, PaymentStatus\n\nfrom ....core.utils.taxes import ZERO_TAXED_MONEY\nfrom ....order import CustomPaymentChoices, OrderEvents, models\nfrom ....order.utils import cancel_order\nfrom ....shipping.models import ShippingMethod as ShippingMethodModel\nfrom ...account.types import AddressInput\nfrom ...core.mutations import BaseMutation\nfrom ...core.types.common import Decimal, Error\nfrom ...order.mutations.draft_orders import DraftOrderUpdate\nfrom ...order.types import Order, OrderEvent\nfrom ...shipping.types import ShippingMethod\n\n\ndef clean_order_update_shipping(order, method, errors):\n if not method:\n return errors\n if not order.shipping_address:\n errors.append(\n Error(\n field='order',\n message=(\n 'Cannot choose a shipping method for an '\n 'order without the shipping address.')))\n return errors\n valid_methods = (\n ShippingMethodModel.objects.applicable_shipping_methods(\n price=order.get_subtotal().gross.amount,\n weight=order.get_total_weight(),\n country_code=order.shipping_address.country.code))\n valid_methods = valid_methods.values_list('id', flat=True)\n if method.pk not in valid_methods:\n errors.append(\n Error(\n field='shippingMethod',\n message='Shipping method cannot be used with this order.'))\n return errors\n\n\ndef try_payment_action(action, money, errors):\n try:\n action(money)\n except (PaymentError, ValueError) as e:\n errors.append(Error(field='payment', message=str(e)))\n\n\ndef clean_order_cancel(order, errors):\n if order and not order.can_cancel():\n errors.append(\n Error(\n field='order',\n message='This order can\\'t be canceled.'))\n return errors\n\n\ndef clean_order_mark_as_paid(order, errors):\n if order and order.payments.exists():\n errors.append(\n Error(\n field='payment',\n message='Orders with payments can not be manually '\n 'marked as paid.'))\n return errors\n\n\ndef clean_order_capture(payment, amount, errors):\n if not payment:\n errors.append(\n Error(\n field='payment',\n message='There\\'s no payment associated with the order.'))\n return errors\n if payment.status != PaymentStatus.PREAUTH:\n errors.append(\n Error(\n field='payment',\n message='Only pre-authorized payments can be captured'))\n return errors\n\n\ndef clean_release_payment(payment, errors):\n \"\"\"Check for payment errors.\"\"\"\n if payment.status != PaymentStatus.PREAUTH:\n errors.append(\n Error(field='payment',\n message='Only pre-authorized payments can be released'))\n try:\n payment.release()\n except (PaymentError, ValueError) as e:\n errors.append(Error(field='payment', message=str(e)))\n return errors\n\n\ndef clean_refund_payment(payment, amount, errors):\n if payment.variant == CustomPaymentChoices.MANUAL:\n errors.append(\n Error(field='payment',\n message='Manual payments can not be refunded.'))\n return errors\n\n\nclass OrderUpdateInput(graphene.InputObjectType):\n billing_address = AddressInput(\n description='Billing address of the customer.')\n user_email = graphene.String(description='Email address of the customer.')\n shipping_address = AddressInput(\n description='Shipping address of the customer.')\n\n\nclass OrderUpdate(DraftOrderUpdate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of an order to update.')\n input = OrderUpdateInput(\n required=True,\n description='Fields required to update an order.')\n\n class Meta:\n description = 'Updates an order.'\n model = models.Order\n\n\nclass OrderUpdateShippingInput(graphene.InputObjectType):\n shipping_method = graphene.ID(\n description='ID of the selected shipping method.',\n name='shippingMethod')\n\n\nclass OrderUpdateShipping(BaseMutation):\n order = graphene.Field(\n Order, description='Order with updated shipping method.')\n\n class Arguments:\n id = graphene.ID(\n required=True, name='order',\n description='ID of the order to update a shipping method.')\n input = OrderUpdateShippingInput(\n description='Fields required to change '\n 'shipping method of the order.')\n\n class Meta:\n description = 'Updates a shipping method of the order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, input):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n\n if not input['shipping_method']:\n if order.is_shipping_required():\n cls.add_error(\n errors, 'shippingMethod',\n 'Shipping method is required for this order.')\n return OrderUpdateShipping(errors=errors)\n order.shipping_method = None\n order.shipping_price = ZERO_TAXED_MONEY\n order.shipping_method_name = None\n order.save(\n update_fields=[\n 'shipping_method', 'shipping_price_net',\n 'shipping_price_gross', 'shipping_method_name'])\n return OrderUpdateShipping(order=order)\n\n method = cls.get_node_or_error(\n info, input['shipping_method'], errors,\n 'shipping_method', ShippingMethod)\n clean_order_update_shipping(order, method, errors)\n if errors:\n return OrderUpdateShipping(errors=errors)\n\n order.shipping_method = method\n order.shipping_price = method.get_total_price(info.context.taxes)\n order.shipping_method_name = method.name\n order.save(\n update_fields=[\n 'shipping_method', 'shipping_method_name',\n 'shipping_price_net', 'shipping_price_gross'])\n return OrderUpdateShipping(order=order)\n\n\nclass OrderAddNoteInput(graphene.InputObjectType):\n message = graphene.String(description='Note message.', name='message')\n\n\nclass OrderAddNote(BaseMutation):\n order = graphene.Field(Order, description='Order with the note added.')\n event = graphene.Field(OrderEvent, description='Order note created.')\n\n class Arguments:\n id = graphene.ID(\n required=True,\n description='ID of the order to add a note for.', name='order')\n input = OrderAddNoteInput(\n required=True,\n description='Fields required to create a note for the order.')\n\n class Meta:\n description = 'Adds note to the order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, input):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n if errors:\n return OrderAddNote(errors=errors)\n\n event = order.events.create(\n type=OrderEvents.NOTE_ADDED.value,\n user=info.context.user,\n parameters={\n 'message': input['message']})\n return OrderAddNote(order=order, event=event)\n\n\nclass OrderCancel(BaseMutation):\n order = graphene.Field(Order, description='Canceled order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to cancel.')\n restock = graphene.Boolean(\n required=True,\n description='Determine if lines will be restocked or not.')\n\n class Meta:\n description = 'Cancel an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, restock):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n clean_order_cancel(order, errors)\n if errors:\n return OrderCancel(errors=errors)\n\n cancel_order(order=order, restock=restock)\n if restock:\n order.events.create(\n type=OrderEvents.FULFILLMENT_RESTOCKED_ITEMS.value,\n user=info.context.user,\n parameters={'quantity': order.get_total_quantity()})\n else:\n order.events.create(\n type=OrderEvents.CANCELED.value, user=info.context.user)\n return OrderCancel(order=order)\n\n\nclass OrderMarkAsPaid(BaseMutation):\n order = graphene.Field(Order, description='Order marked as paid.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to mark paid.')\n\n class Meta:\n description = 'Mark order as manually paid.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n clean_order_mark_as_paid(order, errors)\n if errors:\n return OrderMarkAsPaid(errors=errors)\n\n defaults = {\n 'total': order.total.gross.amount,\n 'tax': order.total.tax.amount,\n 'currency': order.total.currency,\n 'delivery': order.shipping_price.net.amount,\n 'description': pgettext_lazy(\n 'Payment description', 'Order %(order)s') % {'order': order},\n 'captured_amount': order.total.gross.amount}\n models.Payment.objects.get_or_create(\n variant=CustomPaymentChoices.MANUAL,\n status=PaymentStatus.CONFIRMED, order=order,\n defaults=defaults)\n\n order.events.create(\n type=OrderEvents.ORDER_MARKED_AS_PAID.value,\n user=info.context.user)\n return OrderMarkAsPaid(order=order)\n\n\nclass OrderCapture(BaseMutation):\n order = graphene.Field(Order, description='Captured order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to capture.')\n amount = Decimal(\n required=True, description='Amount of money to capture.')\n\n class Meta:\n description = 'Capture an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, amount):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n payment = order.get_last_payment()\n clean_order_capture(payment, amount, errors)\n try_payment_action(payment.capture, amount, errors)\n if errors:\n return OrderCapture(errors=errors)\n\n order.events.create(\n parameters={'amount': amount},\n type=OrderEvents.PAYMENT_CAPTURED.value,\n user=info.context.user)\n return OrderCapture(order=order)\n\n\nclass OrderRelease(BaseMutation):\n order = graphene.Field(Order, description='A released order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to release.')\n\n class Meta:\n description = 'Release an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n if order:\n payment = order.get_last_payment()\n clean_release_payment(payment, errors)\n\n if errors:\n return OrderRelease(errors=errors)\n\n order.events.create(\n type=OrderEvents.PAYMENT_RELEASED.value,\n user=info.context.user)\n return OrderRelease(order=order)\n\n\nclass OrderRefund(BaseMutation):\n order = graphene.Field(Order, description='A refunded order.')\n\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of the order to refund.')\n amount = Decimal(\n required=True, description='Amount of money to refund.')\n\n class Meta:\n description = 'Refund an order.'\n\n @classmethod\n @permission_required('order.manage_orders')\n def mutate(cls, root, info, id, amount):\n errors = []\n order = cls.get_node_or_error(info, id, errors, 'id', Order)\n if order:\n payment = order.get_last_payment()\n clean_refund_payment(payment, amount, errors)\n try_payment_action(payment.refund, amount, errors)\n if errors:\n return OrderRefund(errors=errors)\n\n order.events.create(\n type=OrderEvents.PAYMENT_REFUNDED.value,\n user=info.context.user,\n parameters={'amount': amount})\n return OrderRefund(order=order)\n", "path": "saleor/graphql/order/mutations/orders.py"}]}
| 4,020 | 123 |
gh_patches_debug_51262
|
rasdani/github-patches
|
git_diff
|
conda__conda-5426
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Launching navigator via prompt warnings appear
_From @RidaZubair on May 24, 2017 9:47_
**OS:** Windows
**Anaconda: 4.4.0**
**Actual:**
On launching navigator via prompt following warning appears on prompt

_Copied from original issue: ContinuumIO/navigator#1189_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/common/platform.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from collections import OrderedDict
5 from genericpath import exists
6 from glob import glob
7 from logging import getLogger
8 import sys
9
10 from .compat import iteritems, on_win
11 from .._vendor.auxlib.decorators import memoize
12
13 log = getLogger(__name__)
14
15
16 def is_admin_on_windows(): # pragma: unix no cover
17 # http://stackoverflow.com/a/1026626/2127762
18 if not on_win: # pragma: no cover
19 return False
20 try:
21 from ctypes import windll
22 return windll.shell32.IsUserAnAdmin()() != 0
23 except ImportError as e:
24 log.debug('%r', e)
25 return 'unknown'
26 except Exception as e:
27 log.warn('%r', e)
28 return 'unknown'
29
30
31 @memoize
32 def linux_get_libc_version():
33 """
34 If on linux, returns (libc_family, version), otherwise (None, None)
35 """
36
37 if not sys.platform.startswith('linux'):
38 return None, None
39
40 from os import confstr, confstr_names, readlink
41
42 # Python 2.7 does not have either of these keys in confstr_names, so provide
43 # hard-coded defaults and assert if the key is in confstr_names but differs.
44 # These are defined by POSIX anyway so should never change.
45 confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),
46 ('CS_GNU_LIBPTHREAD_VERSION', 3)])
47
48 val = None
49 for k, v in iteritems(confstr_names_fallback):
50 assert k not in confstr_names or confstr_names[k] == v, (
51 "confstr_names_fallback for %s is %s yet in confstr_names it is %s"
52 "" % (k, confstr_names_fallback[k], confstr_names[k])
53 )
54 try:
55 val = str(confstr(v))
56 except:
57 pass
58 else:
59 if val:
60 break
61
62 if not val:
63 # Weird, play it safe and assume glibc 2.5
64 family, version = 'glibc', '2.5'
65 log.warning("Failed to detect libc family and version, assuming %s/%s", family, version)
66 return family, version
67 family, version = val.split(' ')
68
69 # NPTL is just the name of the threading library, even though the
70 # version refers to that of uClibc. readlink() can help to try to
71 # figure out a better name instead.
72 if family == 'NPTL':
73 clibs = glob('/lib/libc.so*')
74 for clib in clibs:
75 clib = readlink(clib)
76 if exists(clib):
77 if clib.startswith('libuClibc'):
78 if version.startswith('0.'):
79 family = 'uClibc'
80 else:
81 family = 'uClibc-ng'
82 return family, version
83 # This could be some other C library; it is unlikely though.
84 family = 'uClibc'
85 log.warning("Failed to detect non-glibc family, assuming %s (%s)", family, version)
86 return family, version
87 return family, version
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/common/platform.py b/conda/common/platform.py
--- a/conda/common/platform.py
+++ b/conda/common/platform.py
@@ -19,12 +19,12 @@
return False
try:
from ctypes import windll
- return windll.shell32.IsUserAnAdmin()() != 0
+ return windll.shell32.IsUserAnAdmin() != 0
except ImportError as e:
log.debug('%r', e)
return 'unknown'
except Exception as e:
- log.warn('%r', e)
+ log.info('%r', e)
return 'unknown'
|
{"golden_diff": "diff --git a/conda/common/platform.py b/conda/common/platform.py\n--- a/conda/common/platform.py\n+++ b/conda/common/platform.py\n@@ -19,12 +19,12 @@\n return False\n try:\n from ctypes import windll\n- return windll.shell32.IsUserAnAdmin()() != 0\n+ return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n- log.warn('%r', e)\n+ log.info('%r', e)\n return 'unknown'\n", "issue": "Launching navigator via prompt warnings appear\n_From @RidaZubair on May 24, 2017 9:47_\n\n**OS:** Windows\r\n**Anaconda: 4.4.0**\r\n\r\n**Actual:**\r\nOn launching navigator via prompt following warning appears on prompt\r\n\r\n\r\n\n\n_Copied from original issue: ContinuumIO/navigator#1189_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin()() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.warn('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom genericpath import exists\nfrom glob import glob\nfrom logging import getLogger\nimport sys\n\nfrom .compat import iteritems, on_win\nfrom .._vendor.auxlib.decorators import memoize\n\nlog = getLogger(__name__)\n\n\ndef is_admin_on_windows(): # pragma: unix no cover\n # http://stackoverflow.com/a/1026626/2127762\n if not on_win: # pragma: no cover\n return False\n try:\n from ctypes import windll\n return windll.shell32.IsUserAnAdmin() != 0\n except ImportError as e:\n log.debug('%r', e)\n return 'unknown'\n except Exception as e:\n log.info('%r', e)\n return 'unknown'\n\n\n@memoize\ndef linux_get_libc_version():\n \"\"\"\n If on linux, returns (libc_family, version), otherwise (None, None)\n \"\"\"\n\n if not sys.platform.startswith('linux'):\n return None, None\n\n from os import confstr, confstr_names, readlink\n\n # Python 2.7 does not have either of these keys in confstr_names, so provide\n # hard-coded defaults and assert if the key is in confstr_names but differs.\n # These are defined by POSIX anyway so should never change.\n confstr_names_fallback = OrderedDict([('CS_GNU_LIBC_VERSION', 2),\n ('CS_GNU_LIBPTHREAD_VERSION', 3)])\n\n val = None\n for k, v in iteritems(confstr_names_fallback):\n assert k not in confstr_names or confstr_names[k] == v, (\n \"confstr_names_fallback for %s is %s yet in confstr_names it is %s\"\n \"\" % (k, confstr_names_fallback[k], confstr_names[k])\n )\n try:\n val = str(confstr(v))\n except:\n pass\n else:\n if val:\n break\n\n if not val:\n # Weird, play it safe and assume glibc 2.5\n family, version = 'glibc', '2.5'\n log.warning(\"Failed to detect libc family and version, assuming %s/%s\", family, version)\n return family, version\n family, version = val.split(' ')\n\n # NPTL is just the name of the threading library, even though the\n # version refers to that of uClibc. readlink() can help to try to\n # figure out a better name instead.\n if family == 'NPTL':\n clibs = glob('/lib/libc.so*')\n for clib in clibs:\n clib = readlink(clib)\n if exists(clib):\n if clib.startswith('libuClibc'):\n if version.startswith('0.'):\n family = 'uClibc'\n else:\n family = 'uClibc-ng'\n return family, version\n # This could be some other C library; it is unlikely though.\n family = 'uClibc'\n log.warning(\"Failed to detect non-glibc family, assuming %s (%s)\", family, version)\n return family, version\n return family, version\n", "path": "conda/common/platform.py"}]}
| 1,295 | 143 |
gh_patches_debug_24678
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-1693
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
implement AWS::DMS changes from May 14, 2020 update
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/dms.py`
Content:
```
1 # Copyright (c) 2012-2019, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty, Tags
7 from .validators import boolean, integer, network_port, positive_integer
8
9
10 CDC = "cdc"
11 FULL_LOAD = "full-load"
12 FULL_LOAD_AND_CDC = "full-load-and-cdc"
13
14
15 class Certificate(AWSObject):
16 resource_type = "AWS::DMS::Certificate"
17
18 props = {
19 'CertificateIdentifier': (basestring, False),
20 'CertificatePem': (basestring, False),
21 'CertificateWallet': (basestring, False),
22 }
23
24
25 class DynamoDbSettings(AWSProperty):
26 props = {
27 'ServiceAccessRoleArn': (basestring, False),
28 }
29
30
31 class ElasticsearchSettings(AWSProperty):
32 props = {
33 'EndpointUri': (basestring, False),
34 'ErrorRetryDuration': (integer, False),
35 'FullLoadErrorPercentage': (integer, False),
36 'ServiceAccessRoleArn': (basestring, False),
37 }
38
39
40 class KinesisSettings(AWSProperty):
41 props = {
42 'MessageFormat': (basestring, False),
43 'ServiceAccessRoleArn': (basestring, False),
44 'StreamArn': (basestring, False),
45 }
46
47
48 class MongoDbSettings(AWSProperty):
49 props = {
50 'AuthMechanism': (basestring, False),
51 'AuthSource': (basestring, False),
52 'AuthType': (basestring, False),
53 'DatabaseName': (basestring, False),
54 'DocsToInvestigate': (basestring, False),
55 'ExtractDocId': (basestring, False),
56 'NestingLevel': (basestring, False),
57 'Password': (basestring, False),
58 'Port': (network_port, False),
59 'ServerName': (basestring, False),
60 'Username': (basestring, False),
61 }
62
63
64 class S3Settings(AWSProperty):
65 props = {
66 'BucketFolder': (basestring, False),
67 'BucketName': (basestring, False),
68 'CompressionType': (basestring, False),
69 'CsvDelimiter': (basestring, False),
70 'CsvRowDelimiter': (basestring, False),
71 'ExternalTableDefinition': (basestring, False),
72 'ServiceAccessRoleArn': (basestring, False),
73 }
74
75
76 class KafkaSettings(AWSProperty):
77 props = {
78 'Broker': (basestring, False),
79 'Topic': (basestring, False),
80 }
81
82
83 class Endpoint(AWSObject):
84 resource_type = "AWS::DMS::Endpoint"
85
86 props = {
87 'CertificateArn': (basestring, False),
88 'DatabaseName': (basestring, False),
89 'DynamoDbSettings': (DynamoDbSettings, False),
90 'ElasticsearchSettings': (ElasticsearchSettings, False),
91 'EndpointIdentifier': (basestring, False),
92 'EndpointType': (basestring, True),
93 'EngineName': (basestring, True),
94 'ExtraConnectionAttributes': (basestring, False),
95 'KafkaSettings': (KafkaSettings, False),
96 'KinesisSettings': (KinesisSettings, False),
97 'KmsKeyId': (basestring, False),
98 'MongoDbSettings': (MongoDbSettings, False),
99 'Password': (basestring, False),
100 'Port': (network_port, False),
101 'S3Settings': (S3Settings, False),
102 'ServerName': (basestring, False),
103 'SslMode': (basestring, False),
104 'Tags': (Tags, False),
105 'Username': (basestring, False),
106 }
107
108
109 class EventSubscription(AWSObject):
110 resource_type = "AWS::DMS::EventSubscription"
111
112 props = {
113 'Enabled': (boolean, False),
114 'EventCategories': ([basestring], False),
115 'SnsTopicArn': (basestring, True),
116 'SourceIds': ([basestring], False),
117 'SourceType': (basestring, False),
118 'SubscriptionName': (basestring, False),
119 'Tags': (Tags, False),
120 }
121
122
123 class ReplicationInstance(AWSObject):
124 resource_type = "AWS::DMS::ReplicationInstance"
125
126 props = {
127 'AllocatedStorage': (integer, False),
128 'AllowMajorVersionUpgrade': (boolean, False),
129 'AutoMinorVersionUpgrade': (boolean, False),
130 'AvailabilityZone': (basestring, False),
131 'EngineVersion': (basestring, False),
132 'KmsKeyId': (basestring, False),
133 'MultiAZ': (boolean, False),
134 'PreferredMaintenanceWindow': (basestring, False),
135 'PubliclyAccessible': (boolean, False),
136 'ReplicationInstanceClass': (basestring, True),
137 'ReplicationInstanceIdentifier': (basestring, False),
138 'ReplicationSubnetGroupIdentifier': (basestring, False),
139 'Tags': (Tags, False),
140 'VpcSecurityGroupIds': ([basestring], False),
141 }
142
143
144 class ReplicationSubnetGroup(AWSObject):
145 resource_type = "AWS::DMS::ReplicationSubnetGroup"
146
147 props = {
148 'ReplicationSubnetGroupDescription': (basestring, True),
149 'ReplicationSubnetGroupIdentifier': (basestring, False),
150 'SubnetIds': ([basestring], True),
151 'Tags': (Tags, False),
152 }
153
154
155 class ReplicationTask(AWSObject):
156 resource_type = "AWS::DMS::ReplicationTask"
157
158 props = {
159 'CdcStartPosition': (basestring, False),
160 'CdcStartTime': (positive_integer, False),
161 'CdcStopPosition': (basestring, False),
162 'MigrationType': (basestring, True),
163 'ReplicationInstanceArn': (basestring, True),
164 'ReplicationTaskIdentifier': (basestring, False),
165 'ReplicationTaskSettings': (basestring, False),
166 'SourceEndpointArn': (basestring, True),
167 'TableMappings': (basestring, True),
168 'Tags': (Tags, False),
169 'TargetEndpointArn': (basestring, True),
170 }
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/troposphere/dms.py b/troposphere/dms.py
--- a/troposphere/dms.py
+++ b/troposphere/dms.py
@@ -80,6 +80,18 @@
}
+class NeptuneSettings(AWSProperty):
+ props = {
+ 'ErrorRetryDuration': (integer, False),
+ 'IamAuthEnabled': (boolean, False),
+ 'MaxFileSize': (integer, False),
+ 'MaxRetryCount': (integer, False),
+ 'S3BucketFolder': (basestring, False),
+ 'S3BucketName': (basestring, False),
+ 'ServiceAccessRoleArn': (basestring, False),
+ }
+
+
class Endpoint(AWSObject):
resource_type = "AWS::DMS::Endpoint"
@@ -96,6 +108,7 @@
'KinesisSettings': (KinesisSettings, False),
'KmsKeyId': (basestring, False),
'MongoDbSettings': (MongoDbSettings, False),
+ 'NeptuneSettings': (NeptuneSettings, False),
'Password': (basestring, False),
'Port': (network_port, False),
'S3Settings': (S3Settings, False),
@@ -167,4 +180,5 @@
'TableMappings': (basestring, True),
'Tags': (Tags, False),
'TargetEndpointArn': (basestring, True),
+ 'TaskData': (basestring, True),
}
|
{"golden_diff": "diff --git a/troposphere/dms.py b/troposphere/dms.py\n--- a/troposphere/dms.py\n+++ b/troposphere/dms.py\n@@ -80,6 +80,18 @@\n }\n \n \n+class NeptuneSettings(AWSProperty):\n+ props = {\n+ 'ErrorRetryDuration': (integer, False),\n+ 'IamAuthEnabled': (boolean, False),\n+ 'MaxFileSize': (integer, False),\n+ 'MaxRetryCount': (integer, False),\n+ 'S3BucketFolder': (basestring, False),\n+ 'S3BucketName': (basestring, False),\n+ 'ServiceAccessRoleArn': (basestring, False),\n+ }\n+\n+\n class Endpoint(AWSObject):\n resource_type = \"AWS::DMS::Endpoint\"\n \n@@ -96,6 +108,7 @@\n 'KinesisSettings': (KinesisSettings, False),\n 'KmsKeyId': (basestring, False),\n 'MongoDbSettings': (MongoDbSettings, False),\n+ 'NeptuneSettings': (NeptuneSettings, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'S3Settings': (S3Settings, False),\n@@ -167,4 +180,5 @@\n 'TableMappings': (basestring, True),\n 'Tags': (Tags, False),\n 'TargetEndpointArn': (basestring, True),\n+ 'TaskData': (basestring, True),\n }\n", "issue": "implement AWS::DMS changes from May 14, 2020 update\n\n", "before_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import boolean, integer, network_port, positive_integer\n\n\nCDC = \"cdc\"\nFULL_LOAD = \"full-load\"\nFULL_LOAD_AND_CDC = \"full-load-and-cdc\"\n\n\nclass Certificate(AWSObject):\n resource_type = \"AWS::DMS::Certificate\"\n\n props = {\n 'CertificateIdentifier': (basestring, False),\n 'CertificatePem': (basestring, False),\n 'CertificateWallet': (basestring, False),\n }\n\n\nclass DynamoDbSettings(AWSProperty):\n props = {\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass ElasticsearchSettings(AWSProperty):\n props = {\n 'EndpointUri': (basestring, False),\n 'ErrorRetryDuration': (integer, False),\n 'FullLoadErrorPercentage': (integer, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass KinesisSettings(AWSProperty):\n props = {\n 'MessageFormat': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n 'StreamArn': (basestring, False),\n }\n\n\nclass MongoDbSettings(AWSProperty):\n props = {\n 'AuthMechanism': (basestring, False),\n 'AuthSource': (basestring, False),\n 'AuthType': (basestring, False),\n 'DatabaseName': (basestring, False),\n 'DocsToInvestigate': (basestring, False),\n 'ExtractDocId': (basestring, False),\n 'NestingLevel': (basestring, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'ServerName': (basestring, False),\n 'Username': (basestring, False),\n }\n\n\nclass S3Settings(AWSProperty):\n props = {\n 'BucketFolder': (basestring, False),\n 'BucketName': (basestring, False),\n 'CompressionType': (basestring, False),\n 'CsvDelimiter': (basestring, False),\n 'CsvRowDelimiter': (basestring, False),\n 'ExternalTableDefinition': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass KafkaSettings(AWSProperty):\n props = {\n 'Broker': (basestring, False),\n 'Topic': (basestring, False),\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::DMS::Endpoint\"\n\n props = {\n 'CertificateArn': (basestring, False),\n 'DatabaseName': (basestring, False),\n 'DynamoDbSettings': (DynamoDbSettings, False),\n 'ElasticsearchSettings': (ElasticsearchSettings, False),\n 'EndpointIdentifier': (basestring, False),\n 'EndpointType': (basestring, True),\n 'EngineName': (basestring, True),\n 'ExtraConnectionAttributes': (basestring, False),\n 'KafkaSettings': (KafkaSettings, False),\n 'KinesisSettings': (KinesisSettings, False),\n 'KmsKeyId': (basestring, False),\n 'MongoDbSettings': (MongoDbSettings, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'S3Settings': (S3Settings, False),\n 'ServerName': (basestring, False),\n 'SslMode': (basestring, False),\n 'Tags': (Tags, False),\n 'Username': (basestring, False),\n }\n\n\nclass EventSubscription(AWSObject):\n resource_type = \"AWS::DMS::EventSubscription\"\n\n props = {\n 'Enabled': (boolean, False),\n 'EventCategories': ([basestring], False),\n 'SnsTopicArn': (basestring, True),\n 'SourceIds': ([basestring], False),\n 'SourceType': (basestring, False),\n 'SubscriptionName': (basestring, False),\n 'Tags': (Tags, False),\n }\n\n\nclass ReplicationInstance(AWSObject):\n resource_type = \"AWS::DMS::ReplicationInstance\"\n\n props = {\n 'AllocatedStorage': (integer, False),\n 'AllowMajorVersionUpgrade': (boolean, False),\n 'AutoMinorVersionUpgrade': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'EngineVersion': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'MultiAZ': (boolean, False),\n 'PreferredMaintenanceWindow': (basestring, False),\n 'PubliclyAccessible': (boolean, False),\n 'ReplicationInstanceClass': (basestring, True),\n 'ReplicationInstanceIdentifier': (basestring, False),\n 'ReplicationSubnetGroupIdentifier': (basestring, False),\n 'Tags': (Tags, False),\n 'VpcSecurityGroupIds': ([basestring], False),\n }\n\n\nclass ReplicationSubnetGroup(AWSObject):\n resource_type = \"AWS::DMS::ReplicationSubnetGroup\"\n\n props = {\n 'ReplicationSubnetGroupDescription': (basestring, True),\n 'ReplicationSubnetGroupIdentifier': (basestring, False),\n 'SubnetIds': ([basestring], True),\n 'Tags': (Tags, False),\n }\n\n\nclass ReplicationTask(AWSObject):\n resource_type = \"AWS::DMS::ReplicationTask\"\n\n props = {\n 'CdcStartPosition': (basestring, False),\n 'CdcStartTime': (positive_integer, False),\n 'CdcStopPosition': (basestring, False),\n 'MigrationType': (basestring, True),\n 'ReplicationInstanceArn': (basestring, True),\n 'ReplicationTaskIdentifier': (basestring, False),\n 'ReplicationTaskSettings': (basestring, False),\n 'SourceEndpointArn': (basestring, True),\n 'TableMappings': (basestring, True),\n 'Tags': (Tags, False),\n 'TargetEndpointArn': (basestring, True),\n }\n", "path": "troposphere/dms.py"}], "after_files": [{"content": "# Copyright (c) 2012-2019, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty, Tags\nfrom .validators import boolean, integer, network_port, positive_integer\n\n\nCDC = \"cdc\"\nFULL_LOAD = \"full-load\"\nFULL_LOAD_AND_CDC = \"full-load-and-cdc\"\n\n\nclass Certificate(AWSObject):\n resource_type = \"AWS::DMS::Certificate\"\n\n props = {\n 'CertificateIdentifier': (basestring, False),\n 'CertificatePem': (basestring, False),\n 'CertificateWallet': (basestring, False),\n }\n\n\nclass DynamoDbSettings(AWSProperty):\n props = {\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass ElasticsearchSettings(AWSProperty):\n props = {\n 'EndpointUri': (basestring, False),\n 'ErrorRetryDuration': (integer, False),\n 'FullLoadErrorPercentage': (integer, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass KinesisSettings(AWSProperty):\n props = {\n 'MessageFormat': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n 'StreamArn': (basestring, False),\n }\n\n\nclass MongoDbSettings(AWSProperty):\n props = {\n 'AuthMechanism': (basestring, False),\n 'AuthSource': (basestring, False),\n 'AuthType': (basestring, False),\n 'DatabaseName': (basestring, False),\n 'DocsToInvestigate': (basestring, False),\n 'ExtractDocId': (basestring, False),\n 'NestingLevel': (basestring, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'ServerName': (basestring, False),\n 'Username': (basestring, False),\n }\n\n\nclass S3Settings(AWSProperty):\n props = {\n 'BucketFolder': (basestring, False),\n 'BucketName': (basestring, False),\n 'CompressionType': (basestring, False),\n 'CsvDelimiter': (basestring, False),\n 'CsvRowDelimiter': (basestring, False),\n 'ExternalTableDefinition': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass KafkaSettings(AWSProperty):\n props = {\n 'Broker': (basestring, False),\n 'Topic': (basestring, False),\n }\n\n\nclass NeptuneSettings(AWSProperty):\n props = {\n 'ErrorRetryDuration': (integer, False),\n 'IamAuthEnabled': (boolean, False),\n 'MaxFileSize': (integer, False),\n 'MaxRetryCount': (integer, False),\n 'S3BucketFolder': (basestring, False),\n 'S3BucketName': (basestring, False),\n 'ServiceAccessRoleArn': (basestring, False),\n }\n\n\nclass Endpoint(AWSObject):\n resource_type = \"AWS::DMS::Endpoint\"\n\n props = {\n 'CertificateArn': (basestring, False),\n 'DatabaseName': (basestring, False),\n 'DynamoDbSettings': (DynamoDbSettings, False),\n 'ElasticsearchSettings': (ElasticsearchSettings, False),\n 'EndpointIdentifier': (basestring, False),\n 'EndpointType': (basestring, True),\n 'EngineName': (basestring, True),\n 'ExtraConnectionAttributes': (basestring, False),\n 'KafkaSettings': (KafkaSettings, False),\n 'KinesisSettings': (KinesisSettings, False),\n 'KmsKeyId': (basestring, False),\n 'MongoDbSettings': (MongoDbSettings, False),\n 'NeptuneSettings': (NeptuneSettings, False),\n 'Password': (basestring, False),\n 'Port': (network_port, False),\n 'S3Settings': (S3Settings, False),\n 'ServerName': (basestring, False),\n 'SslMode': (basestring, False),\n 'Tags': (Tags, False),\n 'Username': (basestring, False),\n }\n\n\nclass EventSubscription(AWSObject):\n resource_type = \"AWS::DMS::EventSubscription\"\n\n props = {\n 'Enabled': (boolean, False),\n 'EventCategories': ([basestring], False),\n 'SnsTopicArn': (basestring, True),\n 'SourceIds': ([basestring], False),\n 'SourceType': (basestring, False),\n 'SubscriptionName': (basestring, False),\n 'Tags': (Tags, False),\n }\n\n\nclass ReplicationInstance(AWSObject):\n resource_type = \"AWS::DMS::ReplicationInstance\"\n\n props = {\n 'AllocatedStorage': (integer, False),\n 'AllowMajorVersionUpgrade': (boolean, False),\n 'AutoMinorVersionUpgrade': (boolean, False),\n 'AvailabilityZone': (basestring, False),\n 'EngineVersion': (basestring, False),\n 'KmsKeyId': (basestring, False),\n 'MultiAZ': (boolean, False),\n 'PreferredMaintenanceWindow': (basestring, False),\n 'PubliclyAccessible': (boolean, False),\n 'ReplicationInstanceClass': (basestring, True),\n 'ReplicationInstanceIdentifier': (basestring, False),\n 'ReplicationSubnetGroupIdentifier': (basestring, False),\n 'Tags': (Tags, False),\n 'VpcSecurityGroupIds': ([basestring], False),\n }\n\n\nclass ReplicationSubnetGroup(AWSObject):\n resource_type = \"AWS::DMS::ReplicationSubnetGroup\"\n\n props = {\n 'ReplicationSubnetGroupDescription': (basestring, True),\n 'ReplicationSubnetGroupIdentifier': (basestring, False),\n 'SubnetIds': ([basestring], True),\n 'Tags': (Tags, False),\n }\n\n\nclass ReplicationTask(AWSObject):\n resource_type = \"AWS::DMS::ReplicationTask\"\n\n props = {\n 'CdcStartPosition': (basestring, False),\n 'CdcStartTime': (positive_integer, False),\n 'CdcStopPosition': (basestring, False),\n 'MigrationType': (basestring, True),\n 'ReplicationInstanceArn': (basestring, True),\n 'ReplicationTaskIdentifier': (basestring, False),\n 'ReplicationTaskSettings': (basestring, False),\n 'SourceEndpointArn': (basestring, True),\n 'TableMappings': (basestring, True),\n 'Tags': (Tags, False),\n 'TargetEndpointArn': (basestring, True),\n 'TaskData': (basestring, True),\n }\n", "path": "troposphere/dms.py"}]}
| 2,047 | 337 |
gh_patches_debug_13580
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-9015
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import wildcard mentions from Slack into zulip
When a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy
Import wildcard mentions from Slack into zulip
When a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/lib/slack_message_conversion.py`
Content:
```
1 import re
2 from typing import Any, Dict, Tuple, List
3
4 # stubs
5 ZerverFieldsT = Dict[str, Any]
6 AddedUsersT = Dict[str, int]
7
8 # Slack link can be in the format <http://www.foo.com|www.foo.com> and <http://foo.com/>
9 LINK_REGEX = r"""
10 (<) # match '>'
11 (http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/|ftp:\/\/)? # protocol and www
12 ([a-z0-9]+([\-\.]{1}[a-z0-9]+)*)(\.) # domain name
13 ([a-z]{2,63}(:[0-9]{1,5})?) # domain
14 (\/[^>]*)? # path
15 (\|)?(?:\|([^>]+))? # char after pipe (for slack links)
16 (>)
17 """
18
19 SLACK_MAILTO_REGEX = r"""
20 <((mailto:)? # match `<mailto:`
21 ([\w\.-]+@[\w\.-]+(\.[\w]+)+)) # match email
22 (\|)? # match pipe
23 ([\w\.-]+@[\w\.-]+(\.[\w]+)+)?> # match email
24 """
25
26 SLACK_USERMENTION_REGEX = r"""
27 (<@) # Start with '<@'
28 ([a-zA-Z0-9]+) # Here we have the Slack id
29 (\|)? # We not always have a Vertical line in mention
30 ([a-zA-Z0-9]+)? # If Vertical line is present, this is short name
31 (>) # ends with '>'
32 """
33 # Slack doesn't have mid-word message-formatting like Zulip.
34 # Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke
35 # formats the word in Zulip
36 SLACK_STRIKETHROUGH_REGEX = r"""
37 (^|[ -(]|[+-/]|\*|\_|[:-?]|\{|\[|\||\^) # Start after specified characters
38 (\~) # followed by an asterisk
39 ([ -)+-}—]*)([ -}]+) # any character except asterisk
40 (\~) # followed by an asterisk
41 ($|[ -']|[+-/]|[:-?]|\*|\_|\}|\)|\]|\||\^) # ends with specified characters
42 """
43 SLACK_ITALIC_REGEX = r"""
44 (^|[ -(]|[+-/]|[:-?]|\{|\[|\||\^|~)
45 (\_)
46 ([ -^`~—]*)([ -^`-~]+) # any character
47 (\_)
48 ($|[ -']|[+-/]|[:-?]|\}|\)|\]|\||\^|~)
49 """
50 SLACK_BOLD_REGEX = r"""
51 (^|[ -(]|[+-/]|[:-?]|\{|\[|\||\^|~)
52 (\*)
53 ([ -)+-~—]*)([ -)+-~]+) # any character
54 (\*)
55 ($|[ -']|[+-/]|[:-?]|\}|\)|\]|\||\^|~)
56 """
57
58 def get_user_full_name(user: ZerverFieldsT) -> str:
59 if user['deleted'] is False:
60 if user['real_name'] == '':
61 return user['name']
62 else:
63 return user['real_name']
64 else:
65 return user['name']
66
67 # Markdown mapping
68 def convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],
69 added_users: AddedUsersT) -> Tuple[str, List[int], bool]:
70 mentioned_users_id = []
71 text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, "**")
72 text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, "~~")
73 text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, "*")
74
75 # Map Slack's mention all: '<!everyone>' to '@**all** '
76 # No regex for this as it can be present anywhere in the sentence
77 text = text.replace('<!everyone>', '@**all**')
78
79 tokens = text.split(' ')
80 for iterator in range(len(tokens)):
81
82 # Check user mentions and change mention format from
83 # '<@slack_id|short_name>' to '@**full_name**'
84 if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):
85 tokens[iterator], user_id = get_user_mentions(tokens[iterator],
86 users, added_users)
87 if user_id is not None:
88 mentioned_users_id.append(user_id)
89
90 text = ' '.join(tokens)
91
92 # Check and convert link format
93 text, has_link = convert_link_format(text)
94 # convert `<mailto:[email protected]>` to `mailto:[email protected]`
95 text, has_mailto_link = convert_mailto_format(text)
96
97 if has_link is True or has_mailto_link is True:
98 message_has_link = True
99 else:
100 message_has_link = False
101
102 return text, mentioned_users_id, message_has_link
103
104 def get_user_mentions(token: str, users: List[ZerverFieldsT],
105 added_users: AddedUsersT) -> Tuple[str, int]:
106 slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)
107 short_name = slack_usermention_match.group(4)
108 slack_id = slack_usermention_match.group(2)
109 for user in users:
110 if (user['id'] == slack_id and user['name'] == short_name and short_name) or \
111 (user['id'] == slack_id and short_name is None):
112 full_name = get_user_full_name(user)
113 user_id = added_users[slack_id]
114 mention = "@**" + full_name + "**"
115 token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)
116 return token, user_id
117 return token, None
118
119 # Map italic, bold and strikethrough markdown
120 def convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:
121 """
122 Returns:
123 1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'
124 2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'
125 3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'
126 """
127 for match in re.finditer(regex, text, re.VERBOSE):
128 converted_token = (match.group(1) + zulip_keyword + match.group(3)
129 + match.group(4) + zulip_keyword + match.group(6))
130 text = text.replace(match.group(0), converted_token)
131 return text
132
133 def convert_link_format(text: str) -> Tuple[str, bool]:
134 """
135 1. Converts '<https://foo.com>' to 'https://foo.com'
136 2. Converts '<https://foo.com|foo>' to 'https://foo.com|foo'
137 """
138 has_link = False
139 for match in re.finditer(LINK_REGEX, text, re.VERBOSE):
140 converted_text = match.group(0).replace('>', '').replace('<', '')
141 has_link = True
142 text = text.replace(match.group(0), converted_text)
143 return text, has_link
144
145 def convert_mailto_format(text: str) -> Tuple[str, bool]:
146 """
147 1. Converts '<mailto:[email protected]>' to 'mailto:[email protected]'
148 2. Converts '<mailto:[email protected]|[email protected]>' to 'mailto:[email protected]'
149 """
150 has_link = False
151 for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):
152 has_link = True
153 text = text.replace(match.group(0), match.group(1))
154 return text, has_link
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zerver/lib/slack_message_conversion.py b/zerver/lib/slack_message_conversion.py
--- a/zerver/lib/slack_message_conversion.py
+++ b/zerver/lib/slack_message_conversion.py
@@ -73,8 +73,12 @@
text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, "*")
# Map Slack's mention all: '<!everyone>' to '@**all** '
+ # Map Slack's mention all: '<!channel>' to '@**all** '
+ # Map Slack's mention all: '<!here>' to '@**all** '
# No regex for this as it can be present anywhere in the sentence
text = text.replace('<!everyone>', '@**all**')
+ text = text.replace('<!channel>', '@**all**')
+ text = text.replace('<!here>', '@**all**')
tokens = text.split(' ')
for iterator in range(len(tokens)):
|
{"golden_diff": "diff --git a/zerver/lib/slack_message_conversion.py b/zerver/lib/slack_message_conversion.py\n--- a/zerver/lib/slack_message_conversion.py\n+++ b/zerver/lib/slack_message_conversion.py\n@@ -73,8 +73,12 @@\n text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, \"*\")\n \n # Map Slack's mention all: '<!everyone>' to '@**all** '\n+ # Map Slack's mention all: '<!channel>' to '@**all** '\n+ # Map Slack's mention all: '<!here>' to '@**all** '\n # No regex for this as it can be present anywhere in the sentence\n text = text.replace('<!everyone>', '@**all**')\n+ text = text.replace('<!channel>', '@**all**')\n+ text = text.replace('<!here>', '@**all**')\n \n tokens = text.split(' ')\n for iterator in range(len(tokens)):\n", "issue": "Import wildcard mentions from Slack into zulip\nWhen a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy\nImport wildcard mentions from Slack into zulip\nWhen a user does a wildcard mention (i.e. `@channel`, `@here`, etc.), we should translate those to a zulip wildcard mention. I'd probably map them all to `@all` for now, but we should write the code in a way where changing the mapping is easy\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, Tuple, List\n\n# stubs\nZerverFieldsT = Dict[str, Any]\nAddedUsersT = Dict[str, int]\n\n# Slack link can be in the format <http://www.foo.com|www.foo.com> and <http://foo.com/>\nLINK_REGEX = r\"\"\"\n (<) # match '>'\n (http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/|ftp:\\/\\/)? # protocol and www\n ([a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*)(\\.) # domain name\n ([a-z]{2,63}(:[0-9]{1,5})?) # domain\n (\\/[^>]*)? # path\n (\\|)?(?:\\|([^>]+))? # char after pipe (for slack links)\n (>)\n \"\"\"\n\nSLACK_MAILTO_REGEX = r\"\"\"\n <((mailto:)? # match `<mailto:`\n ([\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+)) # match email\n (\\|)? # match pipe\n ([\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+)?> # match email\n \"\"\"\n\nSLACK_USERMENTION_REGEX = r\"\"\"\n (<@) # Start with '<@'\n ([a-zA-Z0-9]+) # Here we have the Slack id\n (\\|)? # We not always have a Vertical line in mention\n ([a-zA-Z0-9]+)? # If Vertical line is present, this is short name\n (>) # ends with '>'\n \"\"\"\n# Slack doesn't have mid-word message-formatting like Zulip.\n# Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke\n# formats the word in Zulip\nSLACK_STRIKETHROUGH_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|\\*|\\_|[:-?]|\\{|\\[|\\||\\^) # Start after specified characters\n (\\~) # followed by an asterisk\n ([ -)+-}\u2014]*)([ -}]+) # any character except asterisk\n (\\~) # followed by an asterisk\n ($|[ -']|[+-/]|[:-?]|\\*|\\_|\\}|\\)|\\]|\\||\\^) # ends with specified characters\n \"\"\"\nSLACK_ITALIC_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\_)\n ([ -^`~\u2014]*)([ -^`-~]+) # any character\n (\\_)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\||\\^|~)\n \"\"\"\nSLACK_BOLD_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\*)\n ([ -)+-~\u2014]*)([ -)+-~]+) # any character\n (\\*)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\||\\^|~)\n \"\"\"\n\ndef get_user_full_name(user: ZerverFieldsT) -> str:\n if user['deleted'] is False:\n if user['real_name'] == '':\n return user['name']\n else:\n return user['real_name']\n else:\n return user['name']\n\n# Markdown mapping\ndef convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, List[int], bool]:\n mentioned_users_id = []\n text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, \"**\")\n text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, \"~~\")\n text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, \"*\")\n\n # Map Slack's mention all: '<!everyone>' to '@**all** '\n # No regex for this as it can be present anywhere in the sentence\n text = text.replace('<!everyone>', '@**all**')\n\n tokens = text.split(' ')\n for iterator in range(len(tokens)):\n\n # Check user mentions and change mention format from\n # '<@slack_id|short_name>' to '@**full_name**'\n if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):\n tokens[iterator], user_id = get_user_mentions(tokens[iterator],\n users, added_users)\n if user_id is not None:\n mentioned_users_id.append(user_id)\n\n text = ' '.join(tokens)\n\n # Check and convert link format\n text, has_link = convert_link_format(text)\n # convert `<mailto:[email protected]>` to `mailto:[email protected]`\n text, has_mailto_link = convert_mailto_format(text)\n\n if has_link is True or has_mailto_link is True:\n message_has_link = True\n else:\n message_has_link = False\n\n return text, mentioned_users_id, message_has_link\n\ndef get_user_mentions(token: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, int]:\n slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)\n short_name = slack_usermention_match.group(4)\n slack_id = slack_usermention_match.group(2)\n for user in users:\n if (user['id'] == slack_id and user['name'] == short_name and short_name) or \\\n (user['id'] == slack_id and short_name is None):\n full_name = get_user_full_name(user)\n user_id = added_users[slack_id]\n mention = \"@**\" + full_name + \"**\"\n token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)\n return token, user_id\n return token, None\n\n# Map italic, bold and strikethrough markdown\ndef convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:\n \"\"\"\n Returns:\n 1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'\n 2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'\n 3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'\n \"\"\"\n for match in re.finditer(regex, text, re.VERBOSE):\n converted_token = (match.group(1) + zulip_keyword + match.group(3)\n + match.group(4) + zulip_keyword + match.group(6))\n text = text.replace(match.group(0), converted_token)\n return text\n\ndef convert_link_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '<https://foo.com>' to 'https://foo.com'\n 2. Converts '<https://foo.com|foo>' to 'https://foo.com|foo'\n \"\"\"\n has_link = False\n for match in re.finditer(LINK_REGEX, text, re.VERBOSE):\n converted_text = match.group(0).replace('>', '').replace('<', '')\n has_link = True\n text = text.replace(match.group(0), converted_text)\n return text, has_link\n\ndef convert_mailto_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '<mailto:[email protected]>' to 'mailto:[email protected]'\n 2. Converts '<mailto:[email protected]|[email protected]>' to 'mailto:[email protected]'\n \"\"\"\n has_link = False\n for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):\n has_link = True\n text = text.replace(match.group(0), match.group(1))\n return text, has_link\n", "path": "zerver/lib/slack_message_conversion.py"}], "after_files": [{"content": "import re\nfrom typing import Any, Dict, Tuple, List\n\n# stubs\nZerverFieldsT = Dict[str, Any]\nAddedUsersT = Dict[str, int]\n\n# Slack link can be in the format <http://www.foo.com|www.foo.com> and <http://foo.com/>\nLINK_REGEX = r\"\"\"\n (<) # match '>'\n (http:\\/\\/www\\.|https:\\/\\/www\\.|http:\\/\\/|https:\\/\\/|ftp:\\/\\/)? # protocol and www\n ([a-z0-9]+([\\-\\.]{1}[a-z0-9]+)*)(\\.) # domain name\n ([a-z]{2,63}(:[0-9]{1,5})?) # domain\n (\\/[^>]*)? # path\n (\\|)?(?:\\|([^>]+))? # char after pipe (for slack links)\n (>)\n \"\"\"\n\nSLACK_MAILTO_REGEX = r\"\"\"\n <((mailto:)? # match `<mailto:`\n ([\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+)) # match email\n (\\|)? # match pipe\n ([\\w\\.-]+@[\\w\\.-]+(\\.[\\w]+)+)?> # match email\n \"\"\"\n\nSLACK_USERMENTION_REGEX = r\"\"\"\n (<@) # Start with '<@'\n ([a-zA-Z0-9]+) # Here we have the Slack id\n (\\|)? # We not always have a Vertical line in mention\n ([a-zA-Z0-9]+)? # If Vertical line is present, this is short name\n (>) # ends with '>'\n \"\"\"\n# Slack doesn't have mid-word message-formatting like Zulip.\n# Hence, ~stri~ke doesn't format the word in slack, but ~~stri~~ke\n# formats the word in Zulip\nSLACK_STRIKETHROUGH_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|\\*|\\_|[:-?]|\\{|\\[|\\||\\^) # Start after specified characters\n (\\~) # followed by an asterisk\n ([ -)+-}\u2014]*)([ -}]+) # any character except asterisk\n (\\~) # followed by an asterisk\n ($|[ -']|[+-/]|[:-?]|\\*|\\_|\\}|\\)|\\]|\\||\\^) # ends with specified characters\n \"\"\"\nSLACK_ITALIC_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\_)\n ([ -^`~\u2014]*)([ -^`-~]+) # any character\n (\\_)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\||\\^|~)\n \"\"\"\nSLACK_BOLD_REGEX = r\"\"\"\n (^|[ -(]|[+-/]|[:-?]|\\{|\\[|\\||\\^|~)\n (\\*)\n ([ -)+-~\u2014]*)([ -)+-~]+) # any character\n (\\*)\n ($|[ -']|[+-/]|[:-?]|\\}|\\)|\\]|\\||\\^|~)\n \"\"\"\n\ndef get_user_full_name(user: ZerverFieldsT) -> str:\n if user['deleted'] is False:\n if user['real_name'] == '':\n return user['name']\n else:\n return user['real_name']\n else:\n return user['name']\n\n# Markdown mapping\ndef convert_to_zulip_markdown(text: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, List[int], bool]:\n mentioned_users_id = []\n text = convert_markdown_syntax(text, SLACK_BOLD_REGEX, \"**\")\n text = convert_markdown_syntax(text, SLACK_STRIKETHROUGH_REGEX, \"~~\")\n text = convert_markdown_syntax(text, SLACK_ITALIC_REGEX, \"*\")\n\n # Map Slack's mention all: '<!everyone>' to '@**all** '\n # Map Slack's mention all: '<!channel>' to '@**all** '\n # Map Slack's mention all: '<!here>' to '@**all** '\n # No regex for this as it can be present anywhere in the sentence\n text = text.replace('<!everyone>', '@**all**')\n text = text.replace('<!channel>', '@**all**')\n text = text.replace('<!here>', '@**all**')\n\n tokens = text.split(' ')\n for iterator in range(len(tokens)):\n\n # Check user mentions and change mention format from\n # '<@slack_id|short_name>' to '@**full_name**'\n if (re.findall(SLACK_USERMENTION_REGEX, tokens[iterator], re.VERBOSE)):\n tokens[iterator], user_id = get_user_mentions(tokens[iterator],\n users, added_users)\n if user_id is not None:\n mentioned_users_id.append(user_id)\n\n text = ' '.join(tokens)\n\n # Check and convert link format\n text, has_link = convert_link_format(text)\n # convert `<mailto:[email protected]>` to `mailto:[email protected]`\n text, has_mailto_link = convert_mailto_format(text)\n\n if has_link is True or has_mailto_link is True:\n message_has_link = True\n else:\n message_has_link = False\n\n return text, mentioned_users_id, message_has_link\n\ndef get_user_mentions(token: str, users: List[ZerverFieldsT],\n added_users: AddedUsersT) -> Tuple[str, int]:\n slack_usermention_match = re.search(SLACK_USERMENTION_REGEX, token, re.VERBOSE)\n short_name = slack_usermention_match.group(4)\n slack_id = slack_usermention_match.group(2)\n for user in users:\n if (user['id'] == slack_id and user['name'] == short_name and short_name) or \\\n (user['id'] == slack_id and short_name is None):\n full_name = get_user_full_name(user)\n user_id = added_users[slack_id]\n mention = \"@**\" + full_name + \"**\"\n token = re.sub(SLACK_USERMENTION_REGEX, mention, token, flags=re.VERBOSE)\n return token, user_id\n return token, None\n\n# Map italic, bold and strikethrough markdown\ndef convert_markdown_syntax(text: str, regex: str, zulip_keyword: str) -> str:\n \"\"\"\n Returns:\n 1. For strikethrough formatting: This maps Slack's '~strike~' to Zulip's '~~strike~~'\n 2. For bold formatting: This maps Slack's '*bold*' to Zulip's '**bold**'\n 3. For italic formatting: This maps Slack's '_italic_' to Zulip's '*italic*'\n \"\"\"\n for match in re.finditer(regex, text, re.VERBOSE):\n converted_token = (match.group(1) + zulip_keyword + match.group(3)\n + match.group(4) + zulip_keyword + match.group(6))\n text = text.replace(match.group(0), converted_token)\n return text\n\ndef convert_link_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '<https://foo.com>' to 'https://foo.com'\n 2. Converts '<https://foo.com|foo>' to 'https://foo.com|foo'\n \"\"\"\n has_link = False\n for match in re.finditer(LINK_REGEX, text, re.VERBOSE):\n converted_text = match.group(0).replace('>', '').replace('<', '')\n has_link = True\n text = text.replace(match.group(0), converted_text)\n return text, has_link\n\ndef convert_mailto_format(text: str) -> Tuple[str, bool]:\n \"\"\"\n 1. Converts '<mailto:[email protected]>' to 'mailto:[email protected]'\n 2. Converts '<mailto:[email protected]|[email protected]>' to 'mailto:[email protected]'\n \"\"\"\n has_link = False\n for match in re.finditer(SLACK_MAILTO_REGEX, text, re.VERBOSE):\n has_link = True\n text = text.replace(match.group(0), match.group(1))\n return text, has_link\n", "path": "zerver/lib/slack_message_conversion.py"}]}
| 2,519 | 208 |
gh_patches_debug_2668
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-1821
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Obselete download link for CLEVR Dataset
Apparently, the current link to CLEVR in the source code is "https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip" that returns the message "All access to this object has been disabled"
When I try to execute the following line of code
`!python ~/ParlAI/examples/display_data.py -t clevr`
I obtain
```
[creating task(s): clevr]
[building data: /root/ParlAI/data/CLEVR]
[ downloading: https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip to /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip ]
Downloading CLEVR_v1.0.zip: 0.00B [00:00, ?B/s]
unpacking CLEVR_v1.0.zip
Traceback (most recent call last):
File "/root/ParlAI/parlai/core/agents.py", line 819, in _create_task_agents
task_agents = my_module.create_agent(opt)
AttributeError: module 'parlai.tasks.clevr.agents' has no attribute 'create_agent'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root/ParlAI/examples/display_data.py", line 22, in <module>
display_data(opt)
File "/root/ParlAI/parlai/scripts/display_data.py", line 42, in display_data
world = create_task(opt, agent)
File "/root/ParlAI/parlai/core/worlds.py", line 1151, in create_task
world = create_task_world(opt, user_agents, default_world=default_world)
File "/root/ParlAI/parlai/core/worlds.py", line 1108, in create_task_world
opt, user_agents, default_world=default_world
File "/root/ParlAI/parlai/core/worlds.py", line 1068, in _get_task_world
task_agents = _create_task_agents(opt)
File "/root/ParlAI/parlai/core/agents.py", line 822, in _create_task_agents
return create_task_agent_from_taskname(opt)
File "/root/ParlAI/parlai/core/agents.py", line 776, in create_task_agent_from_taskname
task_agents = teacher_class(opt)
File "/root/ParlAI/parlai/tasks/clevr/agents.py", line 45, in __init__
data_path, self.images_path = _path(opt)
File "/root/ParlAI/parlai/tasks/clevr/agents.py", line 15, in _path
build(opt)
File "/root/ParlAI/parlai/tasks/clevr/build.py", line 28, in build
build_data.untar(dpath, fname)
File "/root/ParlAI/parlai/core/build_data.py", line 180, in untar
shutil.unpack_archive(fullpath, path)
File "/usr/lib/python3.6/shutil.py", line 983, in unpack_archive
func(filename, extract_dir, **kwargs)
File "/usr/lib/python3.6/shutil.py", line 883, in _unpack_zipfile
raise ReadError("%s is not a zip file" % filename)
shutil.ReadError: /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip is not a zip file
```
I found the following working link on CLEVR webpage (https://cs.stanford.edu/people/jcjohns/clevr/):
https://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/tasks/clevr/build.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 # Download and build the data if it does not exist.
7
8 import parlai.core.build_data as build_data
9 import os
10
11
12 def build(opt):
13 dpath = os.path.join(opt['datapath'], 'CLEVR')
14 version = 'v1.0'
15
16 if not build_data.built(dpath, version_string=version):
17 print('[building data: ' + dpath + ']')
18 # An older version exists, so remove these outdated files.
19 if build_data.built(dpath):
20 build_data.remove_dir(dpath)
21 build_data.make_dir(dpath)
22
23 # Download the data.
24 fname = 'CLEVR_v1.0.zip'
25 url = 'https://s3-us-west-1.amazonaws.com/clevr/'
26
27 build_data.download(url + fname, dpath, fname)
28 build_data.untar(dpath, fname)
29
30 # Mark the data as built.
31 build_data.mark_done(dpath, version_string=version)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parlai/tasks/clevr/build.py b/parlai/tasks/clevr/build.py
--- a/parlai/tasks/clevr/build.py
+++ b/parlai/tasks/clevr/build.py
@@ -22,7 +22,7 @@
# Download the data.
fname = 'CLEVR_v1.0.zip'
- url = 'https://s3-us-west-1.amazonaws.com/clevr/'
+ url = 'https://dl.fbaipublicfiles.com/clevr/'
build_data.download(url + fname, dpath, fname)
build_data.untar(dpath, fname)
|
{"golden_diff": "diff --git a/parlai/tasks/clevr/build.py b/parlai/tasks/clevr/build.py\n--- a/parlai/tasks/clevr/build.py\n+++ b/parlai/tasks/clevr/build.py\n@@ -22,7 +22,7 @@\n \n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n- url = 'https://s3-us-west-1.amazonaws.com/clevr/'\n+ url = 'https://dl.fbaipublicfiles.com/clevr/'\n \n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n", "issue": "Obselete download link for CLEVR Dataset\nApparently, the current link to CLEVR in the source code is \"https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip\" that returns the message \"All access to this object has been disabled\"\r\n\r\nWhen I try to execute the following line of code\r\n\r\n`!python ~/ParlAI/examples/display_data.py -t clevr`\r\n\r\nI obtain\r\n\r\n```\r\n[creating task(s): clevr]\r\n[building data: /root/ParlAI/data/CLEVR]\r\n[ downloading: https://s3-us-west-1.amazonaws.com/clevr/CLEVR_v1.0.zip to /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip ]\r\nDownloading CLEVR_v1.0.zip: 0.00B [00:00, ?B/s]\r\nunpacking CLEVR_v1.0.zip\r\nTraceback (most recent call last):\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 819, in _create_task_agents\r\n task_agents = my_module.create_agent(opt)\r\nAttributeError: module 'parlai.tasks.clevr.agents' has no attribute 'create_agent'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/root/ParlAI/examples/display_data.py\", line 22, in <module>\r\n display_data(opt)\r\n File \"/root/ParlAI/parlai/scripts/display_data.py\", line 42, in display_data\r\n world = create_task(opt, agent)\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1151, in create_task\r\n world = create_task_world(opt, user_agents, default_world=default_world)\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1108, in create_task_world\r\n opt, user_agents, default_world=default_world\r\n File \"/root/ParlAI/parlai/core/worlds.py\", line 1068, in _get_task_world\r\n task_agents = _create_task_agents(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 822, in _create_task_agents\r\n return create_task_agent_from_taskname(opt)\r\n File \"/root/ParlAI/parlai/core/agents.py\", line 776, in create_task_agent_from_taskname\r\n task_agents = teacher_class(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/agents.py\", line 45, in __init__\r\n data_path, self.images_path = _path(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/agents.py\", line 15, in _path\r\n build(opt)\r\n File \"/root/ParlAI/parlai/tasks/clevr/build.py\", line 28, in build\r\n build_data.untar(dpath, fname)\r\n File \"/root/ParlAI/parlai/core/build_data.py\", line 180, in untar\r\n shutil.unpack_archive(fullpath, path)\r\n File \"/usr/lib/python3.6/shutil.py\", line 983, in unpack_archive\r\n func(filename, extract_dir, **kwargs)\r\n File \"/usr/lib/python3.6/shutil.py\", line 883, in _unpack_zipfile\r\n raise ReadError(\"%s is not a zip file\" % filename)\r\nshutil.ReadError: /root/ParlAI/data/CLEVR/CLEVR_v1.0.zip is not a zip file\r\n```\r\n\r\nI found the following working link on CLEVR webpage (https://cs.stanford.edu/people/jcjohns/clevr/):\r\n\r\nhttps://dl.fbaipublicfiles.com/clevr/CLEVR_v1.0.zip\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'CLEVR')\n version = 'v1.0'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n # An older version exists, so remove these outdated files.\n if build_data.built(dpath):\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n url = 'https://s3-us-west-1.amazonaws.com/clevr/'\n\n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/clevr/build.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n# Download and build the data if it does not exist.\n\nimport parlai.core.build_data as build_data\nimport os\n\n\ndef build(opt):\n dpath = os.path.join(opt['datapath'], 'CLEVR')\n version = 'v1.0'\n\n if not build_data.built(dpath, version_string=version):\n print('[building data: ' + dpath + ']')\n # An older version exists, so remove these outdated files.\n if build_data.built(dpath):\n build_data.remove_dir(dpath)\n build_data.make_dir(dpath)\n\n # Download the data.\n fname = 'CLEVR_v1.0.zip'\n url = 'https://dl.fbaipublicfiles.com/clevr/'\n\n build_data.download(url + fname, dpath, fname)\n build_data.untar(dpath, fname)\n\n # Mark the data as built.\n build_data.mark_done(dpath, version_string=version)\n", "path": "parlai/tasks/clevr/build.py"}]}
| 1,429 | 145 |
gh_patches_debug_33339
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-2111
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New numpy hook for intel mkl libraries
Hello - anaconda and winpython build numpy against Intel's mkl libraries. Building someone that uses numpy will need to have those libraries.
Here is a winpython hook:
http://stackoverflow.com/a/35853001
and I adapted that for anaconda & python3:
https://github.com/maqifrnswa/scimpy/blob/master/pyinstaller-hooks/hook-numpy.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-numpy.core.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 # On Windows, numpy depends on a set of dynamically-detemined DLLs, which means
10 # that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969
11 # for more information. The typical error message: ``Intel MKL FATAL ERROR:
12 # Cannot load mkl_intel_thread.dll.``
13 #
14 # So, include them manually.
15 import os
16 import os.path
17 from PyInstaller.utils.hooks import get_package_paths
18
19 pkg_base, pkg_dir = get_package_paths('numpy.core')
20 # Walk through all files in ``numpy.core``, looking for DLLs.
21 datas = []
22 for f in os.listdir(pkg_dir):
23 extension = os.path.splitext(f)[1]
24 if extension == '.dll':
25 # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')
26 source = os.path.join(pkg_dir, f)
27 datas.append((source, ''))
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/hook-numpy.core.py b/PyInstaller/hooks/hook-numpy.core.py
--- a/PyInstaller/hooks/hook-numpy.core.py
+++ b/PyInstaller/hooks/hook-numpy.core.py
@@ -6,22 +6,41 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means
-# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969
-# for more information. The typical error message: ``Intel MKL FATAL ERROR:
-# Cannot load mkl_intel_thread.dll.``
+# If numpy is built with MKL support it depends on a set of libraries loaded
+# at runtime. Since PyInstaller's static analysis can't find them they must be
+# included manually.
#
-# So, include them manually.
+# See
+# https://github.com/pyinstaller/pyinstaller/issues/1881
+# https://github.com/pyinstaller/pyinstaller/issues/1969
+# for more information
import os
import os.path
+import re
from PyInstaller.utils.hooks import get_package_paths
+from PyInstaller import log as logging
+from PyInstaller import compat
+binaries = []
+
+# look for libraries in numpy package path
pkg_base, pkg_dir = get_package_paths('numpy.core')
-# Walk through all files in ``numpy.core``, looking for DLLs.
-datas = []
-for f in os.listdir(pkg_dir):
- extension = os.path.splitext(f)[1]
- if extension == '.dll':
- # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')
- source = os.path.join(pkg_dir, f)
- datas.append((source, ''))
+re_anylib = re.compile(r'\w+\.(?:dll|so)', re.IGNORECASE)
+dlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)]
+binaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg]
+
+# look for MKL libraries in pythons lib directory
+# TODO: check numpy.__config__ if numpy is actually depending on MKL
+# TODO: determine which directories are searched by the os linker
+if compat.is_win:
+ lib_dir = os.path.join(compat.base_prefix, "Library", "bin")
+else:
+ lib_dir = os.path.join(compat.base_prefix, "lib")
+if os.path.isdir(lib_dir):
+ re_mkllib = re.compile(r'^(?:lib)?mkl\w+\.(?:dll|so)', re.IGNORECASE)
+ dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)]
+ if dlls_mkl:
+ logger = logging.getLogger(__name__)
+ logger.info("MKL libraries found when importing numpy. Adding MKL to binaries")
+ binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl]
+
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-numpy.core.py b/PyInstaller/hooks/hook-numpy.core.py\n--- a/PyInstaller/hooks/hook-numpy.core.py\n+++ b/PyInstaller/hooks/hook-numpy.core.py\n@@ -6,22 +6,41 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means\n-# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969\n-# for more information. The typical error message: ``Intel MKL FATAL ERROR:\n-# Cannot load mkl_intel_thread.dll.``\n+# If numpy is built with MKL support it depends on a set of libraries loaded\n+# at runtime. Since PyInstaller's static analysis can't find them they must be\n+# included manually.\n #\n-# So, include them manually.\n+# See\n+# https://github.com/pyinstaller/pyinstaller/issues/1881\n+# https://github.com/pyinstaller/pyinstaller/issues/1969\n+# for more information\n import os\n import os.path\n+import re\n from PyInstaller.utils.hooks import get_package_paths\n+from PyInstaller import log as logging \n+from PyInstaller import compat\n \n+binaries = []\n+\n+# look for libraries in numpy package path\n pkg_base, pkg_dir = get_package_paths('numpy.core')\n-# Walk through all files in ``numpy.core``, looking for DLLs.\n-datas = []\n-for f in os.listdir(pkg_dir):\n- extension = os.path.splitext(f)[1]\n- if extension == '.dll':\n- # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')\n- source = os.path.join(pkg_dir, f)\n- datas.append((source, ''))\n+re_anylib = re.compile(r'\\w+\\.(?:dll|so)', re.IGNORECASE)\n+dlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)]\n+binaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg]\n+\n+# look for MKL libraries in pythons lib directory\n+# TODO: check numpy.__config__ if numpy is actually depending on MKL\n+# TODO: determine which directories are searched by the os linker\n+if compat.is_win:\n+ lib_dir = os.path.join(compat.base_prefix, \"Library\", \"bin\")\n+else:\n+ lib_dir = os.path.join(compat.base_prefix, \"lib\")\n+if os.path.isdir(lib_dir):\n+ re_mkllib = re.compile(r'^(?:lib)?mkl\\w+\\.(?:dll|so)', re.IGNORECASE)\n+ dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)]\n+ if dlls_mkl:\n+ logger = logging.getLogger(__name__)\n+ logger.info(\"MKL libraries found when importing numpy. Adding MKL to binaries\")\n+ binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl]\n+\n", "issue": "New numpy hook for intel mkl libraries\nHello - anaconda and winpython build numpy against Intel's mkl libraries. Building someone that uses numpy will need to have those libraries.\n\nHere is a winpython hook:\nhttp://stackoverflow.com/a/35853001\n\nand I adapted that for anaconda & python3:\nhttps://github.com/maqifrnswa/scimpy/blob/master/pyinstaller-hooks/hook-numpy.py\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n# On Windows, numpy depends on a set of dynamically-detemined DLLs, which means\n# that PyInstaller's static analysis can't find them. See https://github.com/pyinstaller/pyinstaller/issues/1969\n# for more information. The typical error message: ``Intel MKL FATAL ERROR:\n# Cannot load mkl_intel_thread.dll.``\n#\n# So, include them manually.\nimport os\nimport os.path\nfrom PyInstaller.utils.hooks import get_package_paths\n\npkg_base, pkg_dir = get_package_paths('numpy.core')\n# Walk through all files in ``numpy.core``, looking for DLLs.\ndatas = []\nfor f in os.listdir(pkg_dir):\n extension = os.path.splitext(f)[1]\n if extension == '.dll':\n # Produce the tuple ('/abs/path/to/libs/numpy/core/file.dll', '')\n source = os.path.join(pkg_dir, f)\n datas.append((source, ''))\n", "path": "PyInstaller/hooks/hook-numpy.core.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n# If numpy is built with MKL support it depends on a set of libraries loaded\n# at runtime. Since PyInstaller's static analysis can't find them they must be\n# included manually.\n#\n# See\n# https://github.com/pyinstaller/pyinstaller/issues/1881\n# https://github.com/pyinstaller/pyinstaller/issues/1969\n# for more information\nimport os\nimport os.path\nimport re\nfrom PyInstaller.utils.hooks import get_package_paths\nfrom PyInstaller import log as logging \nfrom PyInstaller import compat\n\nbinaries = []\n\n# look for libraries in numpy package path\npkg_base, pkg_dir = get_package_paths('numpy.core')\nre_anylib = re.compile(r'\\w+\\.(?:dll|so)', re.IGNORECASE)\ndlls_pkg = [f for f in os.listdir(pkg_dir) if re_anylib.match(f)]\nbinaries += [(os.path.join(pkg_dir, f), '') for f in dlls_pkg]\n\n# look for MKL libraries in pythons lib directory\n# TODO: check numpy.__config__ if numpy is actually depending on MKL\n# TODO: determine which directories are searched by the os linker\nif compat.is_win:\n lib_dir = os.path.join(compat.base_prefix, \"Library\", \"bin\")\nelse:\n lib_dir = os.path.join(compat.base_prefix, \"lib\")\nif os.path.isdir(lib_dir):\n re_mkllib = re.compile(r'^(?:lib)?mkl\\w+\\.(?:dll|so)', re.IGNORECASE)\n dlls_mkl = [f for f in os.listdir(lib_dir) if re_mkllib.match(f)]\n if dlls_mkl:\n logger = logging.getLogger(__name__)\n logger.info(\"MKL libraries found when importing numpy. Adding MKL to binaries\")\n binaries += [(os.path.join(lib_dir, f), '') for f in dlls_mkl]\n\n", "path": "PyInstaller/hooks/hook-numpy.core.py"}]}
| 666 | 679 |
gh_patches_debug_40264
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-2381
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ideally the bins in histogram equalization is variable rather than limited to 256
https://github.com/tensorflow/addons/blob/d26e2ed5f68092aed57016a7005ce534b1be3dce/tensorflow_addons/image/color_ops.py#L36
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/image/color_ops.py`
Content:
```
1 # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Color operations.
16 equalize: Equalizes image histogram
17 sharpness: Sharpen image
18 """
19
20 import tensorflow as tf
21
22 from tensorflow_addons.utils.types import TensorLike, Number
23 from tensorflow_addons.image.utils import to_4D_image, from_4D_image
24 from tensorflow_addons.image.compose_ops import blend
25
26 from typing import Optional
27 from functools import partial
28
29
30 def _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:
31 """Scale the data in the channel to implement equalize."""
32 image_dtype = image.dtype
33 image = tf.cast(image[:, :, channel], tf.int32)
34
35 # Compute the histogram of the image channel.
36 histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)
37
38 # For the purposes of computing the step, filter out the nonzeros.
39 nonzero_histo = tf.boolean_mask(histo, histo != 0)
40 step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
41
42 # If step is zero, return the original image. Otherwise, build
43 # lut from the full histogram and step and then index from it.
44 if step == 0:
45 result = image
46 else:
47 lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step
48 lut_values = tf.clip_by_value(lut_values, 0, 255)
49 result = tf.gather(lut_values, image)
50
51 return tf.cast(result, image_dtype)
52
53
54 def _equalize_image(image: TensorLike) -> tf.Tensor:
55 """Implements Equalize function from PIL using TF ops."""
56 image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)
57 return image
58
59
60 @tf.function
61 def equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:
62 """Equalize image(s)
63
64 Args:
65 images: A tensor of shape
66 `(num_images, num_rows, num_columns, num_channels)` (NHWC), or
67 `(num_rows, num_columns, num_channels)` (HWC), or
68 `(num_rows, num_columns)` (HW). The rank must be statically known (the
69 shape is not `TensorShape(None)`).
70 name: The name of the op.
71 Returns:
72 Image(s) with the same type and shape as `images`, equalized.
73 """
74 with tf.name_scope(name or "equalize"):
75 image_dims = tf.rank(image)
76 image = to_4D_image(image)
77 fn = partial(_equalize_image)
78 image = tf.map_fn(fn, image)
79 return from_4D_image(image, image_dims)
80
81
82 def _sharpness_image(image: TensorLike, factor: Number) -> tf.Tensor:
83 """Implements Sharpness function from PIL using TF ops."""
84 orig_image = image
85 image_dtype = image.dtype
86 image_channels = image.shape[-1]
87 image = tf.cast(image, tf.float32)
88
89 # SMOOTH PIL Kernel.
90 kernel = (
91 tf.constant(
92 [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]
93 )
94 / 13.0
95 )
96 kernel = tf.tile(kernel, [1, 1, image_channels, 1])
97
98 # Apply kernel channel-wise.
99 degenerate = tf.nn.depthwise_conv2d(
100 image, kernel, strides=[1, 1, 1, 1], padding="VALID", dilations=[1, 1]
101 )
102 degenerate = tf.cast(degenerate, image_dtype)
103
104 # For the borders of the resulting image, fill in the values of the original image.
105 mask = tf.ones_like(degenerate)
106 padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])
107 padded_degenerate = tf.pad(degenerate, [[0, 0], [1, 1], [1, 1], [0, 0]])
108 result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
109
110 # Blend the final result.
111 blended = blend(result, orig_image, factor)
112 return tf.cast(blended, image_dtype)
113
114
115 @tf.function
116 def sharpness(
117 image: TensorLike, factor: Number, name: Optional[str] = None
118 ) -> tf.Tensor:
119 """Change sharpness of image(s).
120
121 Args:
122 image: A tensor of shape
123 `(num_images, num_rows, num_columns, num_channels)` (NHWC), or
124 `(num_rows, num_columns, num_channels)` (HWC)
125 factor: A floating point value or Tensor above 0.0.
126 name: The name of the op.
127 Returns:
128 Image(s) with the same type and shape as `images`, sharper.
129 """
130 with tf.name_scope(name or "sharpness"):
131 image_dims = tf.rank(image)
132 image = to_4D_image(image)
133 image = _sharpness_image(image, factor=factor)
134 return from_4D_image(image, image_dims)
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensorflow_addons/image/color_ops.py b/tensorflow_addons/image/color_ops.py
--- a/tensorflow_addons/image/color_ops.py
+++ b/tensorflow_addons/image/color_ops.py
@@ -27,17 +27,17 @@
from functools import partial
-def _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:
+def _scale_channel(image: TensorLike, channel: int, bins: int = 256) -> tf.Tensor:
"""Scale the data in the channel to implement equalize."""
image_dtype = image.dtype
image = tf.cast(image[:, :, channel], tf.int32)
# Compute the histogram of the image channel.
- histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)
+ histo = tf.histogram_fixed_width(image, [0, bins - 1], nbins=bins)
# For the purposes of computing the step, filter out the nonzeros.
nonzero_histo = tf.boolean_mask(histo, histo != 0)
- step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
+ step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // (bins - 1)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
@@ -45,20 +45,24 @@
result = image
else:
lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step
- lut_values = tf.clip_by_value(lut_values, 0, 255)
+ lut_values = tf.clip_by_value(lut_values, 0, bins - 1)
result = tf.gather(lut_values, image)
return tf.cast(result, image_dtype)
-def _equalize_image(image: TensorLike) -> tf.Tensor:
+def _equalize_image(image: TensorLike, bins: int = 256) -> tf.Tensor:
"""Implements Equalize function from PIL using TF ops."""
- image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)
+ image = tf.stack(
+ [_scale_channel(image, c, bins) for c in range(image.shape[-1])], -1
+ )
return image
@tf.function
-def equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:
+def equalize(
+ image: TensorLike, bins: int = 256, name: Optional[str] = None
+) -> tf.Tensor:
"""Equalize image(s)
Args:
@@ -67,6 +71,7 @@
`(num_rows, num_columns, num_channels)` (HWC), or
`(num_rows, num_columns)` (HW). The rank must be statically known (the
shape is not `TensorShape(None)`).
+ bins: The number of bins in the histogram.
name: The name of the op.
Returns:
Image(s) with the same type and shape as `images`, equalized.
@@ -75,7 +80,7 @@
image_dims = tf.rank(image)
image = to_4D_image(image)
fn = partial(_equalize_image)
- image = tf.map_fn(fn, image)
+ image = tf.map_fn(lambda x: fn(x, bins), image)
return from_4D_image(image, image_dims)
|
{"golden_diff": "diff --git a/tensorflow_addons/image/color_ops.py b/tensorflow_addons/image/color_ops.py\n--- a/tensorflow_addons/image/color_ops.py\n+++ b/tensorflow_addons/image/color_ops.py\n@@ -27,17 +27,17 @@\n from functools import partial\n \n \n-def _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:\n+def _scale_channel(image: TensorLike, channel: int, bins: int = 256) -> tf.Tensor:\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n image_dtype = image.dtype\n image = tf.cast(image[:, :, channel], tf.int32)\n \n # Compute the histogram of the image channel.\n- histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)\n+ histo = tf.histogram_fixed_width(image, [0, bins - 1], nbins=bins)\n \n # For the purposes of computing the step, filter out the nonzeros.\n nonzero_histo = tf.boolean_mask(histo, histo != 0)\n- step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255\n+ step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // (bins - 1)\n \n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n@@ -45,20 +45,24 @@\n result = image\n else:\n lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step\n- lut_values = tf.clip_by_value(lut_values, 0, 255)\n+ lut_values = tf.clip_by_value(lut_values, 0, bins - 1)\n result = tf.gather(lut_values, image)\n \n return tf.cast(result, image_dtype)\n \n \n-def _equalize_image(image: TensorLike) -> tf.Tensor:\n+def _equalize_image(image: TensorLike, bins: int = 256) -> tf.Tensor:\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n- image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)\n+ image = tf.stack(\n+ [_scale_channel(image, c, bins) for c in range(image.shape[-1])], -1\n+ )\n return image\n \n \n @tf.function\n-def equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:\n+def equalize(\n+ image: TensorLike, bins: int = 256, name: Optional[str] = None\n+) -> tf.Tensor:\n \"\"\"Equalize image(s)\n \n Args:\n@@ -67,6 +71,7 @@\n `(num_rows, num_columns, num_channels)` (HWC), or\n `(num_rows, num_columns)` (HW). The rank must be statically known (the\n shape is not `TensorShape(None)`).\n+ bins: The number of bins in the histogram.\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, equalized.\n@@ -75,7 +80,7 @@\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n fn = partial(_equalize_image)\n- image = tf.map_fn(fn, image)\n+ image = tf.map_fn(lambda x: fn(x, bins), image)\n return from_4D_image(image, image_dims)\n", "issue": "Ideally the bins in histogram equalization is variable rather than limited to 256 \nhttps://github.com/tensorflow/addons/blob/d26e2ed5f68092aed57016a7005ce534b1be3dce/tensorflow_addons/image/color_ops.py#L36\n", "before_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Color operations.\n equalize: Equalizes image histogram\n sharpness: Sharpen image\n\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils.types import TensorLike, Number\nfrom tensorflow_addons.image.utils import to_4D_image, from_4D_image\nfrom tensorflow_addons.image.compose_ops import blend\n\nfrom typing import Optional\nfrom functools import partial\n\n\ndef _scale_channel(image: TensorLike, channel: int) -> tf.Tensor:\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n image_dtype = image.dtype\n image = tf.cast(image[:, :, channel], tf.int32)\n\n # Compute the histogram of the image channel.\n histo = tf.histogram_fixed_width(image, [0, 255], nbins=256)\n\n # For the purposes of computing the step, filter out the nonzeros.\n nonzero_histo = tf.boolean_mask(histo, histo != 0)\n step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255\n\n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n if step == 0:\n result = image\n else:\n lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step\n lut_values = tf.clip_by_value(lut_values, 0, 255)\n result = tf.gather(lut_values, image)\n\n return tf.cast(result, image_dtype)\n\n\ndef _equalize_image(image: TensorLike) -> tf.Tensor:\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n image = tf.stack([_scale_channel(image, c) for c in range(image.shape[-1])], -1)\n return image\n\n\[email protected]\ndef equalize(image: TensorLike, name: Optional[str] = None) -> tf.Tensor:\n \"\"\"Equalize image(s)\n\n Args:\n images: A tensor of shape\n `(num_images, num_rows, num_columns, num_channels)` (NHWC), or\n `(num_rows, num_columns, num_channels)` (HWC), or\n `(num_rows, num_columns)` (HW). The rank must be statically known (the\n shape is not `TensorShape(None)`).\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, equalized.\n \"\"\"\n with tf.name_scope(name or \"equalize\"):\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n fn = partial(_equalize_image)\n image = tf.map_fn(fn, image)\n return from_4D_image(image, image_dims)\n\n\ndef _sharpness_image(image: TensorLike, factor: Number) -> tf.Tensor:\n \"\"\"Implements Sharpness function from PIL using TF ops.\"\"\"\n orig_image = image\n image_dtype = image.dtype\n image_channels = image.shape[-1]\n image = tf.cast(image, tf.float32)\n\n # SMOOTH PIL Kernel.\n kernel = (\n tf.constant(\n [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]\n )\n / 13.0\n )\n kernel = tf.tile(kernel, [1, 1, image_channels, 1])\n\n # Apply kernel channel-wise.\n degenerate = tf.nn.depthwise_conv2d(\n image, kernel, strides=[1, 1, 1, 1], padding=\"VALID\", dilations=[1, 1]\n )\n degenerate = tf.cast(degenerate, image_dtype)\n\n # For the borders of the resulting image, fill in the values of the original image.\n mask = tf.ones_like(degenerate)\n padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])\n padded_degenerate = tf.pad(degenerate, [[0, 0], [1, 1], [1, 1], [0, 0]])\n result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)\n\n # Blend the final result.\n blended = blend(result, orig_image, factor)\n return tf.cast(blended, image_dtype)\n\n\[email protected]\ndef sharpness(\n image: TensorLike, factor: Number, name: Optional[str] = None\n) -> tf.Tensor:\n \"\"\"Change sharpness of image(s).\n\n Args:\n image: A tensor of shape\n `(num_images, num_rows, num_columns, num_channels)` (NHWC), or\n `(num_rows, num_columns, num_channels)` (HWC)\n factor: A floating point value or Tensor above 0.0.\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, sharper.\n \"\"\"\n with tf.name_scope(name or \"sharpness\"):\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n image = _sharpness_image(image, factor=factor)\n return from_4D_image(image, image_dims)\n", "path": "tensorflow_addons/image/color_ops.py"}], "after_files": [{"content": "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Color operations.\n equalize: Equalizes image histogram\n sharpness: Sharpen image\n\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_addons.utils.types import TensorLike, Number\nfrom tensorflow_addons.image.utils import to_4D_image, from_4D_image\nfrom tensorflow_addons.image.compose_ops import blend\n\nfrom typing import Optional\nfrom functools import partial\n\n\ndef _scale_channel(image: TensorLike, channel: int, bins: int = 256) -> tf.Tensor:\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n image_dtype = image.dtype\n image = tf.cast(image[:, :, channel], tf.int32)\n\n # Compute the histogram of the image channel.\n histo = tf.histogram_fixed_width(image, [0, bins - 1], nbins=bins)\n\n # For the purposes of computing the step, filter out the nonzeros.\n nonzero_histo = tf.boolean_mask(histo, histo != 0)\n step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // (bins - 1)\n\n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n if step == 0:\n result = image\n else:\n lut_values = (tf.cumsum(histo, exclusive=True) + (step // 2)) // step\n lut_values = tf.clip_by_value(lut_values, 0, bins - 1)\n result = tf.gather(lut_values, image)\n\n return tf.cast(result, image_dtype)\n\n\ndef _equalize_image(image: TensorLike, bins: int = 256) -> tf.Tensor:\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n image = tf.stack(\n [_scale_channel(image, c, bins) for c in range(image.shape[-1])], -1\n )\n return image\n\n\[email protected]\ndef equalize(\n image: TensorLike, bins: int = 256, name: Optional[str] = None\n) -> tf.Tensor:\n \"\"\"Equalize image(s)\n\n Args:\n images: A tensor of shape\n `(num_images, num_rows, num_columns, num_channels)` (NHWC), or\n `(num_rows, num_columns, num_channels)` (HWC), or\n `(num_rows, num_columns)` (HW). The rank must be statically known (the\n shape is not `TensorShape(None)`).\n bins: The number of bins in the histogram.\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, equalized.\n \"\"\"\n with tf.name_scope(name or \"equalize\"):\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n fn = partial(_equalize_image)\n image = tf.map_fn(lambda x: fn(x, bins), image)\n return from_4D_image(image, image_dims)\n\n\ndef _sharpness_image(image: TensorLike, factor: Number) -> tf.Tensor:\n \"\"\"Implements Sharpness function from PIL using TF ops.\"\"\"\n orig_image = image\n image_dtype = image.dtype\n image_channels = image.shape[-1]\n image = tf.cast(image, tf.float32)\n\n # SMOOTH PIL Kernel.\n kernel = (\n tf.constant(\n [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]\n )\n / 13.0\n )\n kernel = tf.tile(kernel, [1, 1, image_channels, 1])\n\n # Apply kernel channel-wise.\n degenerate = tf.nn.depthwise_conv2d(\n image, kernel, strides=[1, 1, 1, 1], padding=\"VALID\", dilations=[1, 1]\n )\n degenerate = tf.cast(degenerate, image_dtype)\n\n # For the borders of the resulting image, fill in the values of the original image.\n mask = tf.ones_like(degenerate)\n padded_mask = tf.pad(mask, [[0, 0], [1, 1], [1, 1], [0, 0]])\n padded_degenerate = tf.pad(degenerate, [[0, 0], [1, 1], [1, 1], [0, 0]])\n result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)\n\n # Blend the final result.\n blended = blend(result, orig_image, factor)\n return tf.cast(blended, image_dtype)\n\n\[email protected]\ndef sharpness(\n image: TensorLike, factor: Number, name: Optional[str] = None\n) -> tf.Tensor:\n \"\"\"Change sharpness of image(s).\n\n Args:\n image: A tensor of shape\n `(num_images, num_rows, num_columns, num_channels)` (NHWC), or\n `(num_rows, num_columns, num_channels)` (HWC)\n factor: A floating point value or Tensor above 0.0.\n name: The name of the op.\n Returns:\n Image(s) with the same type and shape as `images`, sharper.\n \"\"\"\n with tf.name_scope(name or \"sharpness\"):\n image_dims = tf.rank(image)\n image = to_4D_image(image)\n image = _sharpness_image(image, factor=factor)\n return from_4D_image(image, image_dims)\n", "path": "tensorflow_addons/image/color_ops.py"}]}
| 1,935 | 801 |
gh_patches_debug_38724
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-3792
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scheduler should be supported in execute pipeline
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
It should be configurable to schedule your circuit into pulses during execute.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/execute.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 =============================================
17 Executing Experiments (:mod:`qiskit.execute`)
18 =============================================
19
20 .. currentmodule:: qiskit.execute
21
22 .. autofunction:: execute
23 """
24 from qiskit.compiler import transpile, assemble
25 from qiskit.qobj.utils import MeasLevel, MeasReturnType
26
27
28 def execute(experiments, backend,
29 basis_gates=None, coupling_map=None, # circuit transpile options
30 backend_properties=None, initial_layout=None,
31 seed_transpiler=None, optimization_level=None, pass_manager=None,
32 qobj_id=None, qobj_header=None, shots=1024, # common run options
33 memory=False, max_credits=10, seed_simulator=None,
34 default_qubit_los=None, default_meas_los=None, # schedule run options
35 schedule_los=None, meas_level=MeasLevel.CLASSIFIED,
36 meas_return=MeasReturnType.AVERAGE,
37 memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
38 **run_config):
39 """Execute a list of :class:`qiskit.circuit.QuantumCircuit` or
40 :class:`qiskit.pulse.Schedule` on a backend.
41
42 The execution is asynchronous, and a handle to a job instance is returned.
43
44 Args:
45 experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
46 Circuit(s) or pulse schedule(s) to execute
47
48 backend (BaseBackend):
49 Backend to execute circuits on.
50 Transpiler options are automatically grabbed from
51 backend.configuration() and backend.properties().
52 If any other option is explicitly set (e.g. coupling_map), it
53 will override the backend's.
54
55 basis_gates (list[str]):
56 List of basis gate names to unroll to.
57 e.g:
58 ['u1', 'u2', 'u3', 'cx']
59 If None, do not unroll.
60
61 coupling_map (CouplingMap or list):
62 Coupling map (perhaps custom) to target in mapping.
63 Multiple formats are supported:
64 a. CouplingMap instance
65
66 b. list
67 Must be given as an adjacency matrix, where each entry
68 specifies all two-qubit interactions supported by backend
69 e.g:
70 [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]
71
72 backend_properties (BackendProperties):
73 Properties returned by a backend, including information on gate
74 errors, readout errors, qubit coherence times, etc. Find a backend
75 that provides this information with:
76 ``backend.properties()``
77
78 initial_layout (Layout or dict or list):
79 Initial position of virtual qubits on physical qubits.
80 If this layout makes the circuit compatible with the coupling_map
81 constraints, it will be used.
82 The final layout is not guaranteed to be the same, as the transpiler
83 may permute qubits through swaps or other means.
84
85 Multiple formats are supported:
86 a. Layout instance
87
88 b. dict
89 virtual to physical:
90 {qr[0]: 0,
91 qr[1]: 3,
92 qr[2]: 5}
93
94 physical to virtual:
95 {0: qr[0],
96 3: qr[1],
97 5: qr[2]}
98
99 c. list
100 virtual to physical:
101 [0, 3, 5] # virtual qubits are ordered (in addition to named)
102
103 physical to virtual:
104 [qr[0], None, None, qr[1], None, qr[2]]
105
106 seed_transpiler (int):
107 Sets random seed for the stochastic parts of the transpiler
108
109 optimization_level (int):
110 How much optimization to perform on the circuits.
111 Higher levels generate more optimized circuits,
112 at the expense of longer transpilation time.
113 0: No optimization
114 1: Light optimization
115 2: Heavy optimization
116 3: Highest optimization
117 If None, level 1 will be chosen as default.
118
119 pass_manager (PassManager):
120 The pass manager to use during transpilation. If this arg is present,
121 auto-selection of pass manager based on the transpile options will be
122 turned off and this pass manager will be used directly.
123
124 qobj_id (str):
125 String identifier to annotate the Qobj
126
127 qobj_header (QobjHeader or dict):
128 User input that will be inserted in Qobj header, and will also be
129 copied to the corresponding Result header. Headers do not affect the run.
130
131 shots (int):
132 Number of repetitions of each circuit, for sampling. Default: 1024
133
134 memory (bool):
135 If True, per-shot measurement bitstrings are returned as well
136 (provided the backend supports it). For OpenPulse jobs, only
137 measurement level 2 supports this option. Default: False
138
139 max_credits (int):
140 Maximum credits to spend on job. Default: 10
141
142 seed_simulator (int):
143 Random seed to control sampling, for when backend is a simulator
144
145 default_qubit_los (list):
146 List of default qubit LO frequencies in Hz
147
148 default_meas_los (list):
149 List of default meas LO frequencies in Hz
150
151 schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or \
152 Union[Dict[PulseChannel, float], LoConfig]):
153 Experiment LO configurations
154
155 meas_level (int or MeasLevel):
156 Set the appropriate level of the measurement output for pulse experiments.
157
158 meas_return (str or MeasReturn):
159 Level of measurement data for the backend to return
160 For `meas_level` 0 and 1:
161 "single" returns information from every shot.
162 "avg" returns average measurement output (averaged over number of shots).
163
164 memory_slots (int):
165 Number of classical memory slots used in this job.
166
167 memory_slot_size (int):
168 Size of each memory slot if the output is Level 0.
169
170 rep_time (int): repetition time of the experiment in μs.
171 The delay between experiments will be rep_time.
172 Must be from the list provided by the device.
173
174 parameter_binds (list[dict]):
175 List of Parameter bindings over which the set of experiments will be
176 executed. Each list element (bind) should be of the form
177 {Parameter1: value1, Parameter2: value2, ...}. All binds will be
178 executed across all experiments, e.g. if parameter_binds is a
179 length-n list, and there are m experiments, a total of m x n
180 experiments will be run (one for each experiment/bind pair).
181
182 run_config (dict):
183 Extra arguments used to configure the run (e.g. for Aer configurable backends).
184 Refer to the backend documentation for details on these arguments.
185 Note: for now, these keyword arguments will both be copied to the
186 Qobj config, and passed to backend.run()
187
188 Returns:
189 BaseJob: returns job instance derived from BaseJob
190
191 Raises:
192 QiskitError: if the execution cannot be interpreted as either circuits or schedules
193
194 Example:
195 Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.
196
197 .. jupyter-execute::
198
199 from qiskit import QuantumCircuit, execute, BasicAer
200
201 backend = BasicAer.get_backend('qasm_simulator')
202
203 qc = QuantumCircuit(5, 5)
204 qc.h(0)
205 qc.cx(0, range(1, 5))
206 qc.measure_all()
207
208 job = execute(qc, backend, shots=4321)
209 """
210
211 # transpiling the circuits using given transpile options
212 experiments = transpile(experiments,
213 basis_gates=basis_gates,
214 coupling_map=coupling_map,
215 backend_properties=backend_properties,
216 initial_layout=initial_layout,
217 seed_transpiler=seed_transpiler,
218 optimization_level=optimization_level,
219 backend=backend,
220 pass_manager=pass_manager,
221 )
222
223 # assembling the circuits into a qobj to be run on the backend
224 qobj = assemble(experiments,
225 qobj_id=qobj_id,
226 qobj_header=qobj_header,
227 shots=shots,
228 memory=memory,
229 max_credits=max_credits,
230 seed_simulator=seed_simulator,
231 default_qubit_los=default_qubit_los,
232 default_meas_los=default_meas_los,
233 schedule_los=schedule_los,
234 meas_level=meas_level,
235 meas_return=meas_return,
236 memory_slots=memory_slots,
237 memory_slot_size=memory_slot_size,
238 rep_time=rep_time,
239 parameter_binds=parameter_binds,
240 backend=backend,
241 **run_config
242 )
243
244 # executing the circuits on the backend and returning the job
245 return backend.run(qobj, **run_config)
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qiskit/execute.py b/qiskit/execute.py
--- a/qiskit/execute.py
+++ b/qiskit/execute.py
@@ -21,8 +21,10 @@
.. autofunction:: execute
"""
-from qiskit.compiler import transpile, assemble
+from qiskit.compiler import transpile, assemble, schedule
from qiskit.qobj.utils import MeasLevel, MeasReturnType
+from qiskit.pulse import Schedule
+from qiskit.exceptions import QiskitError
def execute(experiments, backend,
@@ -35,6 +37,7 @@
schedule_los=None, meas_level=MeasLevel.CLASSIFIED,
meas_return=MeasReturnType.AVERAGE,
memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
+ schedule_circuit=False, inst_map=None, meas_map=None, scheduling_method=None,
**run_config):
"""Execute a list of :class:`qiskit.circuit.QuantumCircuit` or
:class:`qiskit.pulse.Schedule` on a backend.
@@ -179,6 +182,21 @@
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
+ schedule_circuit (bool):
+ If ``True``, ``experiments`` will be converted to ``Schedule``s prior to
+ execution.
+
+ inst_map (InstructionScheduleMap):
+ Mapping of circuit operations to pulse schedules. If None, defaults to the
+ ``instruction_schedule_map`` of ``backend``.
+
+ meas_map (list(list(int))):
+ List of sets of qubits that must be measured together. If None, defaults to
+ the ``meas_map`` of ``backend``.
+
+ scheduling_method (str or list(str)):
+ Optionally specify a particular scheduling method.
+
run_config (dict):
Extra arguments used to configure the run (e.g. for Aer configurable backends).
Refer to the backend documentation for details on these arguments.
@@ -220,6 +238,16 @@
pass_manager=pass_manager,
)
+ if schedule_circuit:
+ if isinstance(experiments, Schedule) or isinstance(experiments[0], Schedule):
+ raise QiskitError("Must supply QuantumCircuit to schedule circuit.")
+ experiments = schedule(circuits=experiments,
+ backend=backend,
+ inst_map=inst_map,
+ meas_map=meas_map,
+ method=scheduling_method
+ )
+
# assembling the circuits into a qobj to be run on the backend
qobj = assemble(experiments,
qobj_id=qobj_id,
|
{"golden_diff": "diff --git a/qiskit/execute.py b/qiskit/execute.py\n--- a/qiskit/execute.py\n+++ b/qiskit/execute.py\n@@ -21,8 +21,10 @@\n \n .. autofunction:: execute\n \"\"\"\n-from qiskit.compiler import transpile, assemble\n+from qiskit.compiler import transpile, assemble, schedule\n from qiskit.qobj.utils import MeasLevel, MeasReturnType\n+from qiskit.pulse import Schedule\n+from qiskit.exceptions import QiskitError\n \n \n def execute(experiments, backend,\n@@ -35,6 +37,7 @@\n schedule_los=None, meas_level=MeasLevel.CLASSIFIED,\n meas_return=MeasReturnType.AVERAGE,\n memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,\n+ schedule_circuit=False, inst_map=None, meas_map=None, scheduling_method=None,\n **run_config):\n \"\"\"Execute a list of :class:`qiskit.circuit.QuantumCircuit` or\n :class:`qiskit.pulse.Schedule` on a backend.\n@@ -179,6 +182,21 @@\n length-n list, and there are m experiments, a total of m x n\n experiments will be run (one for each experiment/bind pair).\n \n+ schedule_circuit (bool):\n+ If ``True``, ``experiments`` will be converted to ``Schedule``s prior to\n+ execution.\n+\n+ inst_map (InstructionScheduleMap):\n+ Mapping of circuit operations to pulse schedules. If None, defaults to the\n+ ``instruction_schedule_map`` of ``backend``.\n+\n+ meas_map (list(list(int))):\n+ List of sets of qubits that must be measured together. If None, defaults to\n+ the ``meas_map`` of ``backend``.\n+\n+ scheduling_method (str or list(str)):\n+ Optionally specify a particular scheduling method.\n+\n run_config (dict):\n Extra arguments used to configure the run (e.g. for Aer configurable backends).\n Refer to the backend documentation for details on these arguments.\n@@ -220,6 +238,16 @@\n pass_manager=pass_manager,\n )\n \n+ if schedule_circuit:\n+ if isinstance(experiments, Schedule) or isinstance(experiments[0], Schedule):\n+ raise QiskitError(\"Must supply QuantumCircuit to schedule circuit.\")\n+ experiments = schedule(circuits=experiments,\n+ backend=backend,\n+ inst_map=inst_map,\n+ meas_map=meas_map,\n+ method=scheduling_method\n+ )\n+\n # assembling the circuits into a qobj to be run on the backend\n qobj = assemble(experiments,\n qobj_id=qobj_id,\n", "issue": "Scheduler should be supported in execute pipeline\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nIt should be configurable to schedule your circuit into pulses during execute.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\n=============================================\nExecuting Experiments (:mod:`qiskit.execute`)\n=============================================\n\n.. currentmodule:: qiskit.execute\n\n.. autofunction:: execute\n\"\"\"\nfrom qiskit.compiler import transpile, assemble\nfrom qiskit.qobj.utils import MeasLevel, MeasReturnType\n\n\ndef execute(experiments, backend,\n basis_gates=None, coupling_map=None, # circuit transpile options\n backend_properties=None, initial_layout=None,\n seed_transpiler=None, optimization_level=None, pass_manager=None,\n qobj_id=None, qobj_header=None, shots=1024, # common run options\n memory=False, max_credits=10, seed_simulator=None,\n default_qubit_los=None, default_meas_los=None, # schedule run options\n schedule_los=None, meas_level=MeasLevel.CLASSIFIED,\n meas_return=MeasReturnType.AVERAGE,\n memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,\n **run_config):\n \"\"\"Execute a list of :class:`qiskit.circuit.QuantumCircuit` or\n :class:`qiskit.pulse.Schedule` on a backend.\n\n The execution is asynchronous, and a handle to a job instance is returned.\n\n Args:\n experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):\n Circuit(s) or pulse schedule(s) to execute\n\n backend (BaseBackend):\n Backend to execute circuits on.\n Transpiler options are automatically grabbed from\n backend.configuration() and backend.properties().\n If any other option is explicitly set (e.g. coupling_map), it\n will override the backend's.\n\n basis_gates (list[str]):\n List of basis gate names to unroll to.\n e.g:\n ['u1', 'u2', 'u3', 'cx']\n If None, do not unroll.\n\n coupling_map (CouplingMap or list):\n Coupling map (perhaps custom) to target in mapping.\n Multiple formats are supported:\n a. CouplingMap instance\n\n b. list\n Must be given as an adjacency matrix, where each entry\n specifies all two-qubit interactions supported by backend\n e.g:\n [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]\n\n backend_properties (BackendProperties):\n Properties returned by a backend, including information on gate\n errors, readout errors, qubit coherence times, etc. Find a backend\n that provides this information with:\n ``backend.properties()``\n\n initial_layout (Layout or dict or list):\n Initial position of virtual qubits on physical qubits.\n If this layout makes the circuit compatible with the coupling_map\n constraints, it will be used.\n The final layout is not guaranteed to be the same, as the transpiler\n may permute qubits through swaps or other means.\n\n Multiple formats are supported:\n a. Layout instance\n\n b. dict\n virtual to physical:\n {qr[0]: 0,\n qr[1]: 3,\n qr[2]: 5}\n\n physical to virtual:\n {0: qr[0],\n 3: qr[1],\n 5: qr[2]}\n\n c. list\n virtual to physical:\n [0, 3, 5] # virtual qubits are ordered (in addition to named)\n\n physical to virtual:\n [qr[0], None, None, qr[1], None, qr[2]]\n\n seed_transpiler (int):\n Sets random seed for the stochastic parts of the transpiler\n\n optimization_level (int):\n How much optimization to perform on the circuits.\n Higher levels generate more optimized circuits,\n at the expense of longer transpilation time.\n 0: No optimization\n 1: Light optimization\n 2: Heavy optimization\n 3: Highest optimization\n If None, level 1 will be chosen as default.\n\n pass_manager (PassManager):\n The pass manager to use during transpilation. If this arg is present,\n auto-selection of pass manager based on the transpile options will be\n turned off and this pass manager will be used directly.\n\n qobj_id (str):\n String identifier to annotate the Qobj\n\n qobj_header (QobjHeader or dict):\n User input that will be inserted in Qobj header, and will also be\n copied to the corresponding Result header. Headers do not affect the run.\n\n shots (int):\n Number of repetitions of each circuit, for sampling. Default: 1024\n\n memory (bool):\n If True, per-shot measurement bitstrings are returned as well\n (provided the backend supports it). For OpenPulse jobs, only\n measurement level 2 supports this option. Default: False\n\n max_credits (int):\n Maximum credits to spend on job. Default: 10\n\n seed_simulator (int):\n Random seed to control sampling, for when backend is a simulator\n\n default_qubit_los (list):\n List of default qubit LO frequencies in Hz\n\n default_meas_los (list):\n List of default meas LO frequencies in Hz\n\n schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or \\\n Union[Dict[PulseChannel, float], LoConfig]):\n Experiment LO configurations\n\n meas_level (int or MeasLevel):\n Set the appropriate level of the measurement output for pulse experiments.\n\n meas_return (str or MeasReturn):\n Level of measurement data for the backend to return\n For `meas_level` 0 and 1:\n \"single\" returns information from every shot.\n \"avg\" returns average measurement output (averaged over number of shots).\n\n memory_slots (int):\n Number of classical memory slots used in this job.\n\n memory_slot_size (int):\n Size of each memory slot if the output is Level 0.\n\n rep_time (int): repetition time of the experiment in \u03bcs.\n The delay between experiments will be rep_time.\n Must be from the list provided by the device.\n\n parameter_binds (list[dict]):\n List of Parameter bindings over which the set of experiments will be\n executed. Each list element (bind) should be of the form\n {Parameter1: value1, Parameter2: value2, ...}. All binds will be\n executed across all experiments, e.g. if parameter_binds is a\n length-n list, and there are m experiments, a total of m x n\n experiments will be run (one for each experiment/bind pair).\n\n run_config (dict):\n Extra arguments used to configure the run (e.g. for Aer configurable backends).\n Refer to the backend documentation for details on these arguments.\n Note: for now, these keyword arguments will both be copied to the\n Qobj config, and passed to backend.run()\n\n Returns:\n BaseJob: returns job instance derived from BaseJob\n\n Raises:\n QiskitError: if the execution cannot be interpreted as either circuits or schedules\n\n Example:\n Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, BasicAer\n\n backend = BasicAer.get_backend('qasm_simulator')\n\n qc = QuantumCircuit(5, 5)\n qc.h(0)\n qc.cx(0, range(1, 5))\n qc.measure_all()\n\n job = execute(qc, backend, shots=4321)\n \"\"\"\n\n # transpiling the circuits using given transpile options\n experiments = transpile(experiments,\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n backend_properties=backend_properties,\n initial_layout=initial_layout,\n seed_transpiler=seed_transpiler,\n optimization_level=optimization_level,\n backend=backend,\n pass_manager=pass_manager,\n )\n\n # assembling the circuits into a qobj to be run on the backend\n qobj = assemble(experiments,\n qobj_id=qobj_id,\n qobj_header=qobj_header,\n shots=shots,\n memory=memory,\n max_credits=max_credits,\n seed_simulator=seed_simulator,\n default_qubit_los=default_qubit_los,\n default_meas_los=default_meas_los,\n schedule_los=schedule_los,\n meas_level=meas_level,\n meas_return=meas_return,\n memory_slots=memory_slots,\n memory_slot_size=memory_slot_size,\n rep_time=rep_time,\n parameter_binds=parameter_binds,\n backend=backend,\n **run_config\n )\n\n # executing the circuits on the backend and returning the job\n return backend.run(qobj, **run_config)\n", "path": "qiskit/execute.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\n=============================================\nExecuting Experiments (:mod:`qiskit.execute`)\n=============================================\n\n.. currentmodule:: qiskit.execute\n\n.. autofunction:: execute\n\"\"\"\nfrom qiskit.compiler import transpile, assemble, schedule\nfrom qiskit.qobj.utils import MeasLevel, MeasReturnType\nfrom qiskit.pulse import Schedule\nfrom qiskit.exceptions import QiskitError\n\n\ndef execute(experiments, backend,\n basis_gates=None, coupling_map=None, # circuit transpile options\n backend_properties=None, initial_layout=None,\n seed_transpiler=None, optimization_level=None, pass_manager=None,\n qobj_id=None, qobj_header=None, shots=1024, # common run options\n memory=False, max_credits=10, seed_simulator=None,\n default_qubit_los=None, default_meas_los=None, # schedule run options\n schedule_los=None, meas_level=MeasLevel.CLASSIFIED,\n meas_return=MeasReturnType.AVERAGE,\n memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,\n schedule_circuit=False, inst_map=None, meas_map=None, scheduling_method=None,\n **run_config):\n \"\"\"Execute a list of :class:`qiskit.circuit.QuantumCircuit` or\n :class:`qiskit.pulse.Schedule` on a backend.\n\n The execution is asynchronous, and a handle to a job instance is returned.\n\n Args:\n experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):\n Circuit(s) or pulse schedule(s) to execute\n\n backend (BaseBackend):\n Backend to execute circuits on.\n Transpiler options are automatically grabbed from\n backend.configuration() and backend.properties().\n If any other option is explicitly set (e.g. coupling_map), it\n will override the backend's.\n\n basis_gates (list[str]):\n List of basis gate names to unroll to.\n e.g:\n ['u1', 'u2', 'u3', 'cx']\n If None, do not unroll.\n\n coupling_map (CouplingMap or list):\n Coupling map (perhaps custom) to target in mapping.\n Multiple formats are supported:\n a. CouplingMap instance\n\n b. list\n Must be given as an adjacency matrix, where each entry\n specifies all two-qubit interactions supported by backend\n e.g:\n [[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]\n\n backend_properties (BackendProperties):\n Properties returned by a backend, including information on gate\n errors, readout errors, qubit coherence times, etc. Find a backend\n that provides this information with:\n ``backend.properties()``\n\n initial_layout (Layout or dict or list):\n Initial position of virtual qubits on physical qubits.\n If this layout makes the circuit compatible with the coupling_map\n constraints, it will be used.\n The final layout is not guaranteed to be the same, as the transpiler\n may permute qubits through swaps or other means.\n\n Multiple formats are supported:\n a. Layout instance\n\n b. dict\n virtual to physical:\n {qr[0]: 0,\n qr[1]: 3,\n qr[2]: 5}\n\n physical to virtual:\n {0: qr[0],\n 3: qr[1],\n 5: qr[2]}\n\n c. list\n virtual to physical:\n [0, 3, 5] # virtual qubits are ordered (in addition to named)\n\n physical to virtual:\n [qr[0], None, None, qr[1], None, qr[2]]\n\n seed_transpiler (int):\n Sets random seed for the stochastic parts of the transpiler\n\n optimization_level (int):\n How much optimization to perform on the circuits.\n Higher levels generate more optimized circuits,\n at the expense of longer transpilation time.\n 0: No optimization\n 1: Light optimization\n 2: Heavy optimization\n 3: Highest optimization\n If None, level 1 will be chosen as default.\n\n pass_manager (PassManager):\n The pass manager to use during transpilation. If this arg is present,\n auto-selection of pass manager based on the transpile options will be\n turned off and this pass manager will be used directly.\n\n qobj_id (str):\n String identifier to annotate the Qobj\n\n qobj_header (QobjHeader or dict):\n User input that will be inserted in Qobj header, and will also be\n copied to the corresponding Result header. Headers do not affect the run.\n\n shots (int):\n Number of repetitions of each circuit, for sampling. Default: 1024\n\n memory (bool):\n If True, per-shot measurement bitstrings are returned as well\n (provided the backend supports it). For OpenPulse jobs, only\n measurement level 2 supports this option. Default: False\n\n max_credits (int):\n Maximum credits to spend on job. Default: 10\n\n seed_simulator (int):\n Random seed to control sampling, for when backend is a simulator\n\n default_qubit_los (list):\n List of default qubit LO frequencies in Hz\n\n default_meas_los (list):\n List of default meas LO frequencies in Hz\n\n schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or \\\n Union[Dict[PulseChannel, float], LoConfig]):\n Experiment LO configurations\n\n meas_level (int or MeasLevel):\n Set the appropriate level of the measurement output for pulse experiments.\n\n meas_return (str or MeasReturn):\n Level of measurement data for the backend to return\n For `meas_level` 0 and 1:\n \"single\" returns information from every shot.\n \"avg\" returns average measurement output (averaged over number of shots).\n\n memory_slots (int):\n Number of classical memory slots used in this job.\n\n memory_slot_size (int):\n Size of each memory slot if the output is Level 0.\n\n rep_time (int): repetition time of the experiment in \u03bcs.\n The delay between experiments will be rep_time.\n Must be from the list provided by the device.\n\n parameter_binds (list[dict]):\n List of Parameter bindings over which the set of experiments will be\n executed. Each list element (bind) should be of the form\n {Parameter1: value1, Parameter2: value2, ...}. All binds will be\n executed across all experiments, e.g. if parameter_binds is a\n length-n list, and there are m experiments, a total of m x n\n experiments will be run (one for each experiment/bind pair).\n\n schedule_circuit (bool):\n If ``True``, ``experiments`` will be converted to ``Schedule``s prior to\n execution.\n\n inst_map (InstructionScheduleMap):\n Mapping of circuit operations to pulse schedules. If None, defaults to the\n ``instruction_schedule_map`` of ``backend``.\n\n meas_map (list(list(int))):\n List of sets of qubits that must be measured together. If None, defaults to\n the ``meas_map`` of ``backend``.\n\n scheduling_method (str or list(str)):\n Optionally specify a particular scheduling method.\n\n run_config (dict):\n Extra arguments used to configure the run (e.g. for Aer configurable backends).\n Refer to the backend documentation for details on these arguments.\n Note: for now, these keyword arguments will both be copied to the\n Qobj config, and passed to backend.run()\n\n Returns:\n BaseJob: returns job instance derived from BaseJob\n\n Raises:\n QiskitError: if the execution cannot be interpreted as either circuits or schedules\n\n Example:\n Construct a 5-qubit GHZ circuit and execute 4321 shots on a backend.\n\n .. jupyter-execute::\n\n from qiskit import QuantumCircuit, execute, BasicAer\n\n backend = BasicAer.get_backend('qasm_simulator')\n\n qc = QuantumCircuit(5, 5)\n qc.h(0)\n qc.cx(0, range(1, 5))\n qc.measure_all()\n\n job = execute(qc, backend, shots=4321)\n \"\"\"\n\n # transpiling the circuits using given transpile options\n experiments = transpile(experiments,\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n backend_properties=backend_properties,\n initial_layout=initial_layout,\n seed_transpiler=seed_transpiler,\n optimization_level=optimization_level,\n backend=backend,\n pass_manager=pass_manager,\n )\n\n if schedule_circuit:\n if isinstance(experiments, Schedule) or isinstance(experiments[0], Schedule):\n raise QiskitError(\"Must supply QuantumCircuit to schedule circuit.\")\n experiments = schedule(circuits=experiments,\n backend=backend,\n inst_map=inst_map,\n meas_map=meas_map,\n method=scheduling_method\n )\n\n # assembling the circuits into a qobj to be run on the backend\n qobj = assemble(experiments,\n qobj_id=qobj_id,\n qobj_header=qobj_header,\n shots=shots,\n memory=memory,\n max_credits=max_credits,\n seed_simulator=seed_simulator,\n default_qubit_los=default_qubit_los,\n default_meas_los=default_meas_los,\n schedule_los=schedule_los,\n meas_level=meas_level,\n meas_return=meas_return,\n memory_slots=memory_slots,\n memory_slot_size=memory_slot_size,\n rep_time=rep_time,\n parameter_binds=parameter_binds,\n backend=backend,\n **run_config\n )\n\n # executing the circuits on the backend and returning the job\n return backend.run(qobj, **run_config)\n", "path": "qiskit/execute.py"}]}
| 3,064 | 612 |
gh_patches_debug_13726
|
rasdani/github-patches
|
git_diff
|
ycm-core__ycmd-551
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ycmd should return valid JSON instead of empty HTML for 2 requests
/load_extra_conf_file and /ignore_extra_conf_file requests currently return an empty body
in case of success, which is not valid JSON. Instead, ycmd should return valid JSON body, for example just "true".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ycmd/handlers.py`
Content:
```
1 # Copyright (C) 2013 Google Inc.
2 #
3 # This file is part of ycmd.
4 #
5 # ycmd is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # ycmd is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with ycmd. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import absolute_import
19 from __future__ import unicode_literals
20 from __future__ import print_function
21 from __future__ import division
22 from future import standard_library
23 standard_library.install_aliases()
24 from builtins import * # noqa
25
26 import atexit
27 import bottle
28 import json
29 import logging
30 import traceback
31 from bottle import request
32
33 import ycm_core
34 from ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store
35 from ycmd.responses import BuildExceptionResponse, BuildCompletionResponse
36 from ycmd.request_wrap import RequestWrap
37 from ycmd.bottle_utils import SetResponseHeader
38 from ycmd.completers.completer_utils import FilterAndSortCandidatesWrap
39
40
41 # num bytes for the request body buffer; request.json only works if the request
42 # size is less than this
43 bottle.Request.MEMFILE_MAX = 1000 * 1024
44
45 _server_state = None
46 _hmac_secret = bytes()
47 _logger = logging.getLogger( __name__ )
48 app = bottle.Bottle()
49
50
51 @app.post( '/event_notification' )
52 def EventNotification():
53 _logger.info( 'Received event notification' )
54 request_data = RequestWrap( request.json )
55 event_name = request_data[ 'event_name' ]
56 _logger.debug( 'Event name: %s', event_name )
57
58 event_handler = 'On' + event_name
59 getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
60
61 filetypes = request_data[ 'filetypes' ]
62 response_data = None
63 if _server_state.FiletypeCompletionUsable( filetypes ):
64 response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
65 event_handler )( request_data )
66
67 if response_data:
68 return _JsonResponse( response_data )
69 return _JsonResponse( {} )
70
71
72 @app.post( '/run_completer_command' )
73 def RunCompleterCommand():
74 _logger.info( 'Received command request' )
75 request_data = RequestWrap( request.json )
76 completer = _GetCompleterForRequestData( request_data )
77
78 return _JsonResponse( completer.OnUserCommand(
79 request_data[ 'command_arguments' ],
80 request_data ) )
81
82
83 @app.post( '/completions' )
84 def GetCompletions():
85 _logger.info( 'Received completion request' )
86 request_data = RequestWrap( request.json )
87 ( do_filetype_completion, forced_filetype_completion ) = (
88 _server_state.ShouldUseFiletypeCompleter( request_data ) )
89 _logger.debug( 'Using filetype completion: %s', do_filetype_completion )
90
91 errors = None
92 completions = None
93
94 if do_filetype_completion:
95 try:
96 completions = ( _server_state.GetFiletypeCompleter(
97 request_data[ 'filetypes' ] )
98 .ComputeCandidates( request_data ) )
99
100 except Exception as exception:
101 if forced_filetype_completion:
102 # user explicitly asked for semantic completion, so just pass the error
103 # back
104 raise
105 else:
106 # store the error to be returned with results from the identifier
107 # completer
108 stack = traceback.format_exc()
109 _logger.error( 'Exception from semantic completer (using general): ' +
110 "".join( stack ) )
111 errors = [ BuildExceptionResponse( exception, stack ) ]
112
113 if not completions and not forced_filetype_completion:
114 completions = ( _server_state.GetGeneralCompleter()
115 .ComputeCandidates( request_data ) )
116
117 return _JsonResponse(
118 BuildCompletionResponse( completions if completions else [],
119 request_data.CompletionStartColumn(),
120 errors = errors ) )
121
122
123 @app.post( '/filter_and_sort_candidates' )
124 def FilterAndSortCandidates():
125 _logger.info( 'Received filter & sort request' )
126 # Not using RequestWrap because no need and the requests coming in aren't like
127 # the usual requests we handle.
128 request_data = request.json
129
130 return _JsonResponse( FilterAndSortCandidatesWrap(
131 request_data[ 'candidates'],
132 request_data[ 'sort_property' ],
133 request_data[ 'query' ] ) )
134
135
136 @app.get( '/healthy' )
137 def GetHealthy():
138 _logger.info( 'Received health request' )
139 if request.query.include_subservers:
140 cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
141 return _JsonResponse( cs_completer.ServerIsHealthy() )
142 return _JsonResponse( True )
143
144
145 @app.get( '/ready' )
146 def GetReady():
147 _logger.info( 'Received ready request' )
148 if request.query.subserver:
149 filetype = request.query.subserver
150 return _JsonResponse( _IsSubserverReady( filetype ) )
151 if request.query.include_subservers:
152 return _JsonResponse( _IsSubserverReady( 'cs' ) )
153 return _JsonResponse( True )
154
155
156 def _IsSubserverReady( filetype ):
157 completer = _server_state.GetFiletypeCompleter( [filetype] )
158 return completer.ServerIsReady()
159
160
161 @app.post( '/semantic_completion_available' )
162 def FiletypeCompletionAvailable():
163 _logger.info( 'Received filetype completion available request' )
164 return _JsonResponse( _server_state.FiletypeCompletionAvailable(
165 RequestWrap( request.json )[ 'filetypes' ] ) )
166
167
168 @app.post( '/defined_subcommands' )
169 def DefinedSubcommands():
170 _logger.info( 'Received defined subcommands request' )
171 completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
172
173 return _JsonResponse( completer.DefinedSubcommands() )
174
175
176 @app.post( '/detailed_diagnostic' )
177 def GetDetailedDiagnostic():
178 _logger.info( 'Received detailed diagnostic request' )
179 request_data = RequestWrap( request.json )
180 completer = _GetCompleterForRequestData( request_data )
181
182 return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
183
184
185 @app.post( '/load_extra_conf_file' )
186 def LoadExtraConfFile():
187 _logger.info( 'Received extra conf load request' )
188 request_data = RequestWrap( request.json, validate = False )
189 extra_conf_store.Load( request_data[ 'filepath' ], force = True )
190
191
192 @app.post( '/ignore_extra_conf_file' )
193 def IgnoreExtraConfFile():
194 _logger.info( 'Received extra conf ignore request' )
195 request_data = RequestWrap( request.json, validate = False )
196 extra_conf_store.Disable( request_data[ 'filepath' ] )
197
198
199 @app.post( '/debug_info' )
200 def DebugInfo():
201 _logger.info( 'Received debug info request' )
202
203 output = []
204 has_clang_support = ycm_core.HasClangSupport()
205 output.append( 'Server has Clang support compiled in: {0}'.format(
206 has_clang_support ) )
207
208 if has_clang_support:
209 output.append( 'Clang version: ' + ycm_core.ClangVersion() )
210
211 request_data = RequestWrap( request.json )
212 try:
213 output.append(
214 _GetCompleterForRequestData( request_data ).DebugInfo( request_data ) )
215 except Exception:
216 _logger.debug( 'Exception in debug info request: '
217 + traceback.format_exc() )
218
219 return _JsonResponse( '\n'.join( output ) )
220
221
222 # The type of the param is Bottle.HTTPError
223 def ErrorHandler( httperror ):
224 body = _JsonResponse( BuildExceptionResponse( httperror.exception,
225 httperror.traceback ) )
226 hmac_plugin.SetHmacHeader( body, _hmac_secret )
227 return body
228
229 # For every error Bottle encounters it will use this as the default handler
230 app.default_error_handler = ErrorHandler
231
232
233 def _JsonResponse( data ):
234 SetResponseHeader( 'Content-Type', 'application/json' )
235 return json.dumps( data, default = _UniversalSerialize )
236
237
238 def _UniversalSerialize( obj ):
239 try:
240 serialized = obj.__dict__.copy()
241 serialized[ 'TYPE' ] = type( obj ).__name__
242 return serialized
243 except AttributeError:
244 return str( obj )
245
246
247 def _GetCompleterForRequestData( request_data ):
248 completer_target = request_data.get( 'completer_target', None )
249
250 if completer_target == 'identifier':
251 return _server_state.GetGeneralCompleter().GetIdentifierCompleter()
252 elif completer_target == 'filetype_default' or not completer_target:
253 return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
254 else:
255 return _server_state.GetFiletypeCompleter( [ completer_target ] )
256
257
258 @atexit.register
259 def ServerShutdown():
260 _logger.info( 'Server shutting down' )
261 if _server_state:
262 _server_state.Shutdown()
263 extra_conf_store.Shutdown()
264
265
266 def SetHmacSecret( hmac_secret ):
267 global _hmac_secret
268 _hmac_secret = hmac_secret
269
270
271 def UpdateUserOptions( options ):
272 global _server_state
273
274 if not options:
275 return
276
277 # This should never be passed in, but let's try to remove it just in case.
278 options.pop( 'hmac_secret', None )
279 user_options_store.SetAll( options )
280 _server_state = server_state.ServerState( options )
281
282
283 def SetServerStateToDefaults():
284 global _server_state, _logger
285 _logger = logging.getLogger( __name__ )
286 user_options_store.LoadDefaults()
287 _server_state = server_state.ServerState( user_options_store.GetAll() )
288 extra_conf_store.Reset()
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ycmd/handlers.py b/ycmd/handlers.py
--- a/ycmd/handlers.py
+++ b/ycmd/handlers.py
@@ -188,6 +188,8 @@
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
+ return _JsonResponse( True )
+
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
@@ -195,6 +197,8 @@
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
+ return _JsonResponse( True )
+
@app.post( '/debug_info' )
def DebugInfo():
|
{"golden_diff": "diff --git a/ycmd/handlers.py b/ycmd/handlers.py\n--- a/ycmd/handlers.py\n+++ b/ycmd/handlers.py\n@@ -188,6 +188,8 @@\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n \n+ return _JsonResponse( True )\n+\n \n @app.post( '/ignore_extra_conf_file' )\n def IgnoreExtraConfFile():\n@@ -195,6 +197,8 @@\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n \n+ return _JsonResponse( True )\n+\n \n @app.post( '/debug_info' )\n def DebugInfo():\n", "issue": "ycmd should return valid JSON instead of empty HTML for 2 requests\n/load_extra_conf_file and /ignore_extra_conf_file requests currently return an empty body\nin case of success, which is not valid JSON. Instead, ycmd should return valid JSON body, for example just \"true\".\n\n", "before_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport atexit\nimport bottle\nimport json\nimport logging\nimport traceback\nfrom bottle import request\n\nimport ycm_core\nfrom ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store\nfrom ycmd.responses import BuildExceptionResponse, BuildCompletionResponse\nfrom ycmd.request_wrap import RequestWrap\nfrom ycmd.bottle_utils import SetResponseHeader\nfrom ycmd.completers.completer_utils import FilterAndSortCandidatesWrap\n\n\n# num bytes for the request body buffer; request.json only works if the request\n# size is less than this\nbottle.Request.MEMFILE_MAX = 1000 * 1024\n\n_server_state = None\n_hmac_secret = bytes()\n_logger = logging.getLogger( __name__ )\napp = bottle.Bottle()\n\n\[email protected]( '/event_notification' )\ndef EventNotification():\n _logger.info( 'Received event notification' )\n request_data = RequestWrap( request.json )\n event_name = request_data[ 'event_name' ]\n _logger.debug( 'Event name: %s', event_name )\n\n event_handler = 'On' + event_name\n getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )\n\n filetypes = request_data[ 'filetypes' ]\n response_data = None\n if _server_state.FiletypeCompletionUsable( filetypes ):\n response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),\n event_handler )( request_data )\n\n if response_data:\n return _JsonResponse( response_data )\n return _JsonResponse( {} )\n\n\[email protected]( '/run_completer_command' )\ndef RunCompleterCommand():\n _logger.info( 'Received command request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.OnUserCommand(\n request_data[ 'command_arguments' ],\n request_data ) )\n\n\[email protected]( '/completions' )\ndef GetCompletions():\n _logger.info( 'Received completion request' )\n request_data = RequestWrap( request.json )\n ( do_filetype_completion, forced_filetype_completion ) = (\n _server_state.ShouldUseFiletypeCompleter( request_data ) )\n _logger.debug( 'Using filetype completion: %s', do_filetype_completion )\n\n errors = None\n completions = None\n\n if do_filetype_completion:\n try:\n completions = ( _server_state.GetFiletypeCompleter(\n request_data[ 'filetypes' ] )\n .ComputeCandidates( request_data ) )\n\n except Exception as exception:\n if forced_filetype_completion:\n # user explicitly asked for semantic completion, so just pass the error\n # back\n raise\n else:\n # store the error to be returned with results from the identifier\n # completer\n stack = traceback.format_exc()\n _logger.error( 'Exception from semantic completer (using general): ' +\n \"\".join( stack ) )\n errors = [ BuildExceptionResponse( exception, stack ) ]\n\n if not completions and not forced_filetype_completion:\n completions = ( _server_state.GetGeneralCompleter()\n .ComputeCandidates( request_data ) )\n\n return _JsonResponse(\n BuildCompletionResponse( completions if completions else [],\n request_data.CompletionStartColumn(),\n errors = errors ) )\n\n\[email protected]( '/filter_and_sort_candidates' )\ndef FilterAndSortCandidates():\n _logger.info( 'Received filter & sort request' )\n # Not using RequestWrap because no need and the requests coming in aren't like\n # the usual requests we handle.\n request_data = request.json\n\n return _JsonResponse( FilterAndSortCandidatesWrap(\n request_data[ 'candidates'],\n request_data[ 'sort_property' ],\n request_data[ 'query' ] ) )\n\n\[email protected]( '/healthy' )\ndef GetHealthy():\n _logger.info( 'Received health request' )\n if request.query.include_subservers:\n cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )\n return _JsonResponse( cs_completer.ServerIsHealthy() )\n return _JsonResponse( True )\n\n\[email protected]( '/ready' )\ndef GetReady():\n _logger.info( 'Received ready request' )\n if request.query.subserver:\n filetype = request.query.subserver\n return _JsonResponse( _IsSubserverReady( filetype ) )\n if request.query.include_subservers:\n return _JsonResponse( _IsSubserverReady( 'cs' ) )\n return _JsonResponse( True )\n\n\ndef _IsSubserverReady( filetype ):\n completer = _server_state.GetFiletypeCompleter( [filetype] )\n return completer.ServerIsReady()\n\n\[email protected]( '/semantic_completion_available' )\ndef FiletypeCompletionAvailable():\n _logger.info( 'Received filetype completion available request' )\n return _JsonResponse( _server_state.FiletypeCompletionAvailable(\n RequestWrap( request.json )[ 'filetypes' ] ) )\n\n\[email protected]( '/defined_subcommands' )\ndef DefinedSubcommands():\n _logger.info( 'Received defined subcommands request' )\n completer = _GetCompleterForRequestData( RequestWrap( request.json ) )\n\n return _JsonResponse( completer.DefinedSubcommands() )\n\n\[email protected]( '/detailed_diagnostic' )\ndef GetDetailedDiagnostic():\n _logger.info( 'Received detailed diagnostic request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )\n\n\[email protected]( '/load_extra_conf_file' )\ndef LoadExtraConfFile():\n _logger.info( 'Received extra conf load request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n\n\[email protected]( '/ignore_extra_conf_file' )\ndef IgnoreExtraConfFile():\n _logger.info( 'Received extra conf ignore request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n\n\[email protected]( '/debug_info' )\ndef DebugInfo():\n _logger.info( 'Received debug info request' )\n\n output = []\n has_clang_support = ycm_core.HasClangSupport()\n output.append( 'Server has Clang support compiled in: {0}'.format(\n has_clang_support ) )\n\n if has_clang_support:\n output.append( 'Clang version: ' + ycm_core.ClangVersion() )\n\n request_data = RequestWrap( request.json )\n try:\n output.append(\n _GetCompleterForRequestData( request_data ).DebugInfo( request_data ) )\n except Exception:\n _logger.debug( 'Exception in debug info request: '\n + traceback.format_exc() )\n\n return _JsonResponse( '\\n'.join( output ) )\n\n\n# The type of the param is Bottle.HTTPError\ndef ErrorHandler( httperror ):\n body = _JsonResponse( BuildExceptionResponse( httperror.exception,\n httperror.traceback ) )\n hmac_plugin.SetHmacHeader( body, _hmac_secret )\n return body\n\n# For every error Bottle encounters it will use this as the default handler\napp.default_error_handler = ErrorHandler\n\n\ndef _JsonResponse( data ):\n SetResponseHeader( 'Content-Type', 'application/json' )\n return json.dumps( data, default = _UniversalSerialize )\n\n\ndef _UniversalSerialize( obj ):\n try:\n serialized = obj.__dict__.copy()\n serialized[ 'TYPE' ] = type( obj ).__name__\n return serialized\n except AttributeError:\n return str( obj )\n\n\ndef _GetCompleterForRequestData( request_data ):\n completer_target = request_data.get( 'completer_target', None )\n\n if completer_target == 'identifier':\n return _server_state.GetGeneralCompleter().GetIdentifierCompleter()\n elif completer_target == 'filetype_default' or not completer_target:\n return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )\n else:\n return _server_state.GetFiletypeCompleter( [ completer_target ] )\n\n\[email protected]\ndef ServerShutdown():\n _logger.info( 'Server shutting down' )\n if _server_state:\n _server_state.Shutdown()\n extra_conf_store.Shutdown()\n\n\ndef SetHmacSecret( hmac_secret ):\n global _hmac_secret\n _hmac_secret = hmac_secret\n\n\ndef UpdateUserOptions( options ):\n global _server_state\n\n if not options:\n return\n\n # This should never be passed in, but let's try to remove it just in case.\n options.pop( 'hmac_secret', None )\n user_options_store.SetAll( options )\n _server_state = server_state.ServerState( options )\n\n\ndef SetServerStateToDefaults():\n global _server_state, _logger\n _logger = logging.getLogger( __name__ )\n user_options_store.LoadDefaults()\n _server_state = server_state.ServerState( user_options_store.GetAll() )\n extra_conf_store.Reset()\n", "path": "ycmd/handlers.py"}], "after_files": [{"content": "# Copyright (C) 2013 Google Inc.\n#\n# This file is part of ycmd.\n#\n# ycmd is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# ycmd is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with ycmd. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import * # noqa\n\nimport atexit\nimport bottle\nimport json\nimport logging\nimport traceback\nfrom bottle import request\n\nimport ycm_core\nfrom ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store\nfrom ycmd.responses import BuildExceptionResponse, BuildCompletionResponse\nfrom ycmd.request_wrap import RequestWrap\nfrom ycmd.bottle_utils import SetResponseHeader\nfrom ycmd.completers.completer_utils import FilterAndSortCandidatesWrap\n\n\n# num bytes for the request body buffer; request.json only works if the request\n# size is less than this\nbottle.Request.MEMFILE_MAX = 1000 * 1024\n\n_server_state = None\n_hmac_secret = bytes()\n_logger = logging.getLogger( __name__ )\napp = bottle.Bottle()\n\n\[email protected]( '/event_notification' )\ndef EventNotification():\n _logger.info( 'Received event notification' )\n request_data = RequestWrap( request.json )\n event_name = request_data[ 'event_name' ]\n _logger.debug( 'Event name: %s', event_name )\n\n event_handler = 'On' + event_name\n getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )\n\n filetypes = request_data[ 'filetypes' ]\n response_data = None\n if _server_state.FiletypeCompletionUsable( filetypes ):\n response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),\n event_handler )( request_data )\n\n if response_data:\n return _JsonResponse( response_data )\n return _JsonResponse( {} )\n\n\[email protected]( '/run_completer_command' )\ndef RunCompleterCommand():\n _logger.info( 'Received command request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.OnUserCommand(\n request_data[ 'command_arguments' ],\n request_data ) )\n\n\[email protected]( '/completions' )\ndef GetCompletions():\n _logger.info( 'Received completion request' )\n request_data = RequestWrap( request.json )\n ( do_filetype_completion, forced_filetype_completion ) = (\n _server_state.ShouldUseFiletypeCompleter( request_data ) )\n _logger.debug( 'Using filetype completion: %s', do_filetype_completion )\n\n errors = None\n completions = None\n\n if do_filetype_completion:\n try:\n completions = ( _server_state.GetFiletypeCompleter(\n request_data[ 'filetypes' ] )\n .ComputeCandidates( request_data ) )\n\n except Exception as exception:\n if forced_filetype_completion:\n # user explicitly asked for semantic completion, so just pass the error\n # back\n raise\n else:\n # store the error to be returned with results from the identifier\n # completer\n stack = traceback.format_exc()\n _logger.error( 'Exception from semantic completer (using general): ' +\n \"\".join( stack ) )\n errors = [ BuildExceptionResponse( exception, stack ) ]\n\n if not completions and not forced_filetype_completion:\n completions = ( _server_state.GetGeneralCompleter()\n .ComputeCandidates( request_data ) )\n\n return _JsonResponse(\n BuildCompletionResponse( completions if completions else [],\n request_data.CompletionStartColumn(),\n errors = errors ) )\n\n\[email protected]( '/filter_and_sort_candidates' )\ndef FilterAndSortCandidates():\n _logger.info( 'Received filter & sort request' )\n # Not using RequestWrap because no need and the requests coming in aren't like\n # the usual requests we handle.\n request_data = request.json\n\n return _JsonResponse( FilterAndSortCandidatesWrap(\n request_data[ 'candidates'],\n request_data[ 'sort_property' ],\n request_data[ 'query' ] ) )\n\n\[email protected]( '/healthy' )\ndef GetHealthy():\n _logger.info( 'Received health request' )\n if request.query.include_subservers:\n cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )\n return _JsonResponse( cs_completer.ServerIsHealthy() )\n return _JsonResponse( True )\n\n\[email protected]( '/ready' )\ndef GetReady():\n _logger.info( 'Received ready request' )\n if request.query.subserver:\n filetype = request.query.subserver\n return _JsonResponse( _IsSubserverReady( filetype ) )\n if request.query.include_subservers:\n return _JsonResponse( _IsSubserverReady( 'cs' ) )\n return _JsonResponse( True )\n\n\ndef _IsSubserverReady( filetype ):\n completer = _server_state.GetFiletypeCompleter( [filetype] )\n return completer.ServerIsReady()\n\n\[email protected]( '/semantic_completion_available' )\ndef FiletypeCompletionAvailable():\n _logger.info( 'Received filetype completion available request' )\n return _JsonResponse( _server_state.FiletypeCompletionAvailable(\n RequestWrap( request.json )[ 'filetypes' ] ) )\n\n\[email protected]( '/defined_subcommands' )\ndef DefinedSubcommands():\n _logger.info( 'Received defined subcommands request' )\n completer = _GetCompleterForRequestData( RequestWrap( request.json ) )\n\n return _JsonResponse( completer.DefinedSubcommands() )\n\n\[email protected]( '/detailed_diagnostic' )\ndef GetDetailedDiagnostic():\n _logger.info( 'Received detailed diagnostic request' )\n request_data = RequestWrap( request.json )\n completer = _GetCompleterForRequestData( request_data )\n\n return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )\n\n\[email protected]( '/load_extra_conf_file' )\ndef LoadExtraConfFile():\n _logger.info( 'Received extra conf load request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Load( request_data[ 'filepath' ], force = True )\n\n return _JsonResponse( True )\n\n\[email protected]( '/ignore_extra_conf_file' )\ndef IgnoreExtraConfFile():\n _logger.info( 'Received extra conf ignore request' )\n request_data = RequestWrap( request.json, validate = False )\n extra_conf_store.Disable( request_data[ 'filepath' ] )\n\n return _JsonResponse( True )\n\n\[email protected]( '/debug_info' )\ndef DebugInfo():\n _logger.info( 'Received debug info request' )\n\n output = []\n has_clang_support = ycm_core.HasClangSupport()\n output.append( 'Server has Clang support compiled in: {0}'.format(\n has_clang_support ) )\n\n if has_clang_support:\n output.append( 'Clang version: ' + ycm_core.ClangVersion() )\n\n request_data = RequestWrap( request.json )\n try:\n output.append(\n _GetCompleterForRequestData( request_data ).DebugInfo( request_data ) )\n except Exception:\n _logger.debug( 'Exception in debug info request: '\n + traceback.format_exc() )\n\n return _JsonResponse( '\\n'.join( output ) )\n\n\n# The type of the param is Bottle.HTTPError\ndef ErrorHandler( httperror ):\n body = _JsonResponse( BuildExceptionResponse( httperror.exception,\n httperror.traceback ) )\n hmac_plugin.SetHmacHeader( body, _hmac_secret )\n return body\n\n# For every error Bottle encounters it will use this as the default handler\napp.default_error_handler = ErrorHandler\n\n\ndef _JsonResponse( data ):\n SetResponseHeader( 'Content-Type', 'application/json' )\n return json.dumps( data, default = _UniversalSerialize )\n\n\ndef _UniversalSerialize( obj ):\n try:\n serialized = obj.__dict__.copy()\n serialized[ 'TYPE' ] = type( obj ).__name__\n return serialized\n except AttributeError:\n return str( obj )\n\n\ndef _GetCompleterForRequestData( request_data ):\n completer_target = request_data.get( 'completer_target', None )\n\n if completer_target == 'identifier':\n return _server_state.GetGeneralCompleter().GetIdentifierCompleter()\n elif completer_target == 'filetype_default' or not completer_target:\n return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )\n else:\n return _server_state.GetFiletypeCompleter( [ completer_target ] )\n\n\[email protected]\ndef ServerShutdown():\n _logger.info( 'Server shutting down' )\n if _server_state:\n _server_state.Shutdown()\n extra_conf_store.Shutdown()\n\n\ndef SetHmacSecret( hmac_secret ):\n global _hmac_secret\n _hmac_secret = hmac_secret\n\n\ndef UpdateUserOptions( options ):\n global _server_state\n\n if not options:\n return\n\n # This should never be passed in, but let's try to remove it just in case.\n options.pop( 'hmac_secret', None )\n user_options_store.SetAll( options )\n _server_state = server_state.ServerState( options )\n\n\ndef SetServerStateToDefaults():\n global _server_state, _logger\n _logger = logging.getLogger( __name__ )\n user_options_store.LoadDefaults()\n _server_state = server_state.ServerState( user_options_store.GetAll() )\n extra_conf_store.Reset()\n", "path": "ycmd/handlers.py"}]}
| 3,317 | 182 |
gh_patches_debug_5100
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1748
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
httpx tests failing on httpx==0.23.1
Can we leave https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1459 open until we actually fix the instrumentation to work with that version?
_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1460#pullrequestreview-1186403709_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.
16 # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.
17
18 libraries = {
19 "aio_pika": {
20 "library": "aio_pika >= 7.2.0, < 10.0.0",
21 "instrumentation": "opentelemetry-instrumentation-aio-pika==0.40b0.dev",
22 },
23 "aiohttp": {
24 "library": "aiohttp ~= 3.0",
25 "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.40b0.dev",
26 },
27 "aiopg": {
28 "library": "aiopg >= 0.13.0, < 2.0.0",
29 "instrumentation": "opentelemetry-instrumentation-aiopg==0.40b0.dev",
30 },
31 "asgiref": {
32 "library": "asgiref ~= 3.0",
33 "instrumentation": "opentelemetry-instrumentation-asgi==0.40b0.dev",
34 },
35 "asyncpg": {
36 "library": "asyncpg >= 0.12.0",
37 "instrumentation": "opentelemetry-instrumentation-asyncpg==0.40b0.dev",
38 },
39 "boto": {
40 "library": "boto~=2.0",
41 "instrumentation": "opentelemetry-instrumentation-boto==0.40b0.dev",
42 },
43 "boto3": {
44 "library": "boto3 ~= 1.0",
45 "instrumentation": "opentelemetry-instrumentation-boto3sqs==0.40b0.dev",
46 },
47 "botocore": {
48 "library": "botocore ~= 1.0",
49 "instrumentation": "opentelemetry-instrumentation-botocore==0.40b0.dev",
50 },
51 "celery": {
52 "library": "celery >= 4.0, < 6.0",
53 "instrumentation": "opentelemetry-instrumentation-celery==0.40b0.dev",
54 },
55 "confluent-kafka": {
56 "library": "confluent-kafka >= 1.8.2, < 2.0.0",
57 "instrumentation": "opentelemetry-instrumentation-confluent-kafka==0.40b0.dev",
58 },
59 "django": {
60 "library": "django >= 1.10",
61 "instrumentation": "opentelemetry-instrumentation-django==0.40b0.dev",
62 },
63 "elasticsearch": {
64 "library": "elasticsearch >= 2.0",
65 "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.40b0.dev",
66 },
67 "falcon": {
68 "library": "falcon >= 1.4.1, < 4.0.0",
69 "instrumentation": "opentelemetry-instrumentation-falcon==0.40b0.dev",
70 },
71 "fastapi": {
72 "library": "fastapi ~= 0.58",
73 "instrumentation": "opentelemetry-instrumentation-fastapi==0.40b0.dev",
74 },
75 "flask": {
76 "library": "flask >= 1.0, < 3.0",
77 "instrumentation": "opentelemetry-instrumentation-flask==0.40b0.dev",
78 },
79 "grpcio": {
80 "library": "grpcio ~= 1.27",
81 "instrumentation": "opentelemetry-instrumentation-grpc==0.40b0.dev",
82 },
83 "httpx": {
84 "library": "httpx >= 0.18.0, <= 0.23.0",
85 "instrumentation": "opentelemetry-instrumentation-httpx==0.40b0.dev",
86 },
87 "jinja2": {
88 "library": "jinja2 >= 2.7, < 4.0",
89 "instrumentation": "opentelemetry-instrumentation-jinja2==0.40b0.dev",
90 },
91 "kafka-python": {
92 "library": "kafka-python >= 2.0",
93 "instrumentation": "opentelemetry-instrumentation-kafka-python==0.40b0.dev",
94 },
95 "mysql-connector-python": {
96 "library": "mysql-connector-python ~= 8.0",
97 "instrumentation": "opentelemetry-instrumentation-mysql==0.40b0.dev",
98 },
99 "pika": {
100 "library": "pika >= 0.12.0",
101 "instrumentation": "opentelemetry-instrumentation-pika==0.40b0.dev",
102 },
103 "psycopg2": {
104 "library": "psycopg2 >= 2.7.3.1",
105 "instrumentation": "opentelemetry-instrumentation-psycopg2==0.40b0.dev",
106 },
107 "pymemcache": {
108 "library": "pymemcache >= 1.3.5, < 5",
109 "instrumentation": "opentelemetry-instrumentation-pymemcache==0.40b0.dev",
110 },
111 "pymongo": {
112 "library": "pymongo >= 3.1, < 5.0",
113 "instrumentation": "opentelemetry-instrumentation-pymongo==0.40b0.dev",
114 },
115 "PyMySQL": {
116 "library": "PyMySQL < 2",
117 "instrumentation": "opentelemetry-instrumentation-pymysql==0.40b0.dev",
118 },
119 "pyramid": {
120 "library": "pyramid >= 1.7",
121 "instrumentation": "opentelemetry-instrumentation-pyramid==0.40b0.dev",
122 },
123 "redis": {
124 "library": "redis >= 2.6",
125 "instrumentation": "opentelemetry-instrumentation-redis==0.40b0.dev",
126 },
127 "remoulade": {
128 "library": "remoulade >= 0.50",
129 "instrumentation": "opentelemetry-instrumentation-remoulade==0.40b0.dev",
130 },
131 "requests": {
132 "library": "requests ~= 2.0",
133 "instrumentation": "opentelemetry-instrumentation-requests==0.40b0.dev",
134 },
135 "scikit-learn": {
136 "library": "scikit-learn ~= 0.24.0",
137 "instrumentation": "opentelemetry-instrumentation-sklearn==0.40b0.dev",
138 },
139 "sqlalchemy": {
140 "library": "sqlalchemy",
141 "instrumentation": "opentelemetry-instrumentation-sqlalchemy==0.40b0.dev",
142 },
143 "starlette": {
144 "library": "starlette ~= 0.13.0",
145 "instrumentation": "opentelemetry-instrumentation-starlette==0.40b0.dev",
146 },
147 "psutil": {
148 "library": "psutil >= 5",
149 "instrumentation": "opentelemetry-instrumentation-system-metrics==0.40b0.dev",
150 },
151 "tornado": {
152 "library": "tornado >= 5.1.1",
153 "instrumentation": "opentelemetry-instrumentation-tornado==0.40b0.dev",
154 },
155 "tortoise-orm": {
156 "library": "tortoise-orm >= 0.17.0",
157 "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.40b0.dev",
158 },
159 "pydantic": {
160 "library": "pydantic >= 1.10.2",
161 "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.40b0.dev",
162 },
163 "urllib3": {
164 "library": "urllib3 >= 1.0.0, < 2.0.0",
165 "instrumentation": "opentelemetry-instrumentation-urllib3==0.40b0.dev",
166 },
167 }
168 default_instrumentations = [
169 "opentelemetry-instrumentation-aws-lambda==0.40b0.dev",
170 "opentelemetry-instrumentation-dbapi==0.40b0.dev",
171 "opentelemetry-instrumentation-logging==0.40b0.dev",
172 "opentelemetry-instrumentation-sqlite3==0.40b0.dev",
173 "opentelemetry-instrumentation-urllib==0.40b0.dev",
174 "opentelemetry-instrumentation-wsgi==0.40b0.dev",
175 ]
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py
@@ -81,7 +81,7 @@
"instrumentation": "opentelemetry-instrumentation-grpc==0.40b0.dev",
},
"httpx": {
- "library": "httpx >= 0.18.0, <= 0.23.0",
+ "library": "httpx >= 0.18.0",
"instrumentation": "opentelemetry-instrumentation-httpx==0.40b0.dev",
},
"jinja2": {
|
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n@@ -81,7 +81,7 @@\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.40b0.dev\",\n },\n \"httpx\": {\n- \"library\": \"httpx >= 0.18.0, <= 0.23.0\",\n+ \"library\": \"httpx >= 0.18.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.40b0.dev\",\n },\n \"jinja2\": {\n", "issue": "httpx tests failing on httpx==0.23.1\n Can we leave https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1459 open until we actually fix the instrumentation to work with that version?\r\n\r\n_Originally posted by @aabmass in https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1460#pullrequestreview-1186403709_\r\n \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aio_pika\": {\n \"library\": \"aio_pika >= 7.2.0, < 10.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aio-pika==0.40b0.dev\",\n },\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.40b0.dev\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.40b0.dev\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.40b0.dev\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.40b0.dev\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.40b0.dev\",\n },\n \"boto3\": {\n \"library\": \"boto3 ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto3sqs==0.40b0.dev\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.40b0.dev\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.40b0.dev\",\n },\n \"confluent-kafka\": {\n \"library\": \"confluent-kafka >= 1.8.2, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-confluent-kafka==0.40b0.dev\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.40b0.dev\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.40b0.dev\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 1.4.1, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.40b0.dev\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.40b0.dev\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.40b0.dev\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.40b0.dev\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0, <= 0.23.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.40b0.dev\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.40b0.dev\",\n },\n \"kafka-python\": {\n \"library\": \"kafka-python >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-kafka-python==0.40b0.dev\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.40b0.dev\",\n },\n \"pika\": {\n \"library\": \"pika >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.40b0.dev\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.40b0.dev\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache >= 1.3.5, < 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.40b0.dev\",\n },\n \"pymongo\": {\n \"library\": \"pymongo >= 3.1, < 5.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.40b0.dev\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.40b0.dev\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.40b0.dev\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.40b0.dev\",\n },\n \"remoulade\": {\n \"library\": \"remoulade >= 0.50\",\n \"instrumentation\": \"opentelemetry-instrumentation-remoulade==0.40b0.dev\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.40b0.dev\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.40b0.dev\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.40b0.dev\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.40b0.dev\",\n },\n \"psutil\": {\n \"library\": \"psutil >= 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-system-metrics==0.40b0.dev\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 5.1.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.40b0.dev\",\n },\n \"tortoise-orm\": {\n \"library\": \"tortoise-orm >= 0.17.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.40b0.dev\",\n },\n \"pydantic\": {\n \"library\": \"pydantic >= 1.10.2\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.40b0.dev\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.40b0.dev\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.40b0.dev\",\n \"opentelemetry-instrumentation-dbapi==0.40b0.dev\",\n \"opentelemetry-instrumentation-logging==0.40b0.dev\",\n \"opentelemetry-instrumentation-sqlite3==0.40b0.dev\",\n \"opentelemetry-instrumentation-urllib==0.40b0.dev\",\n \"opentelemetry-instrumentation-wsgi==0.40b0.dev\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = {\n \"aio_pika\": {\n \"library\": \"aio_pika >= 7.2.0, < 10.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aio-pika==0.40b0.dev\",\n },\n \"aiohttp\": {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.40b0.dev\",\n },\n \"aiopg\": {\n \"library\": \"aiopg >= 0.13.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.40b0.dev\",\n },\n \"asgiref\": {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.40b0.dev\",\n },\n \"asyncpg\": {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.40b0.dev\",\n },\n \"boto\": {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.40b0.dev\",\n },\n \"boto3\": {\n \"library\": \"boto3 ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto3sqs==0.40b0.dev\",\n },\n \"botocore\": {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.40b0.dev\",\n },\n \"celery\": {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.40b0.dev\",\n },\n \"confluent-kafka\": {\n \"library\": \"confluent-kafka >= 1.8.2, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-confluent-kafka==0.40b0.dev\",\n },\n \"django\": {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.40b0.dev\",\n },\n \"elasticsearch\": {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.40b0.dev\",\n },\n \"falcon\": {\n \"library\": \"falcon >= 1.4.1, < 4.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.40b0.dev\",\n },\n \"fastapi\": {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.40b0.dev\",\n },\n \"flask\": {\n \"library\": \"flask >= 1.0, < 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.40b0.dev\",\n },\n \"grpcio\": {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.40b0.dev\",\n },\n \"httpx\": {\n \"library\": \"httpx >= 0.18.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.40b0.dev\",\n },\n \"jinja2\": {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.40b0.dev\",\n },\n \"kafka-python\": {\n \"library\": \"kafka-python >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-kafka-python==0.40b0.dev\",\n },\n \"mysql-connector-python\": {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.40b0.dev\",\n },\n \"pika\": {\n \"library\": \"pika >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.40b0.dev\",\n },\n \"psycopg2\": {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.40b0.dev\",\n },\n \"pymemcache\": {\n \"library\": \"pymemcache >= 1.3.5, < 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.40b0.dev\",\n },\n \"pymongo\": {\n \"library\": \"pymongo >= 3.1, < 5.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.40b0.dev\",\n },\n \"PyMySQL\": {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.40b0.dev\",\n },\n \"pyramid\": {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.40b0.dev\",\n },\n \"redis\": {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.40b0.dev\",\n },\n \"remoulade\": {\n \"library\": \"remoulade >= 0.50\",\n \"instrumentation\": \"opentelemetry-instrumentation-remoulade==0.40b0.dev\",\n },\n \"requests\": {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.40b0.dev\",\n },\n \"scikit-learn\": {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.40b0.dev\",\n },\n \"sqlalchemy\": {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.40b0.dev\",\n },\n \"starlette\": {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.40b0.dev\",\n },\n \"psutil\": {\n \"library\": \"psutil >= 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-system-metrics==0.40b0.dev\",\n },\n \"tornado\": {\n \"library\": \"tornado >= 5.1.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.40b0.dev\",\n },\n \"tortoise-orm\": {\n \"library\": \"tortoise-orm >= 0.17.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.40b0.dev\",\n },\n \"pydantic\": {\n \"library\": \"pydantic >= 1.10.2\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.40b0.dev\",\n },\n \"urllib3\": {\n \"library\": \"urllib3 >= 1.0.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.40b0.dev\",\n },\n}\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-aws-lambda==0.40b0.dev\",\n \"opentelemetry-instrumentation-dbapi==0.40b0.dev\",\n \"opentelemetry-instrumentation-logging==0.40b0.dev\",\n \"opentelemetry-instrumentation-sqlite3==0.40b0.dev\",\n \"opentelemetry-instrumentation-urllib==0.40b0.dev\",\n \"opentelemetry-instrumentation-wsgi==0.40b0.dev\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}]}
| 2,903 | 194 |
gh_patches_debug_24865
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-10134
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Language: typo ("spech") in Natural Language samples
#### Environment details
N/A
#### Steps to reproduce
N/A
#### Code example
Comments say "Parts of spech" instead of "Parts of speech"
https://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_gcs.py#L67
https://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_text.py#L66
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `language/samples/v1/language_syntax_gcs.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Google LLC
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # https://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 # DO NOT EDIT! This is a generated sample ("Request", "language_syntax_gcs")
18
19 # To install the latest published package dependency, execute the following:
20 # pip install google-cloud-language
21
22 # sample-metadata
23 # title: Analyzing Syntax (GCS)
24 # description: Analyzing Syntax in text file stored in Cloud Storage
25 # usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri "gs://cloud-samples-data/language/syntax-sentence.txt"]
26
27 # [START language_syntax_gcs]
28 from google.cloud import language_v1
29 from google.cloud.language_v1 import enums
30
31
32 def sample_analyze_syntax(gcs_content_uri):
33 """
34 Analyzing Syntax in text file stored in Cloud Storage
35
36 Args:
37 gcs_content_uri Google Cloud Storage URI where the file content is located.
38 e.g. gs://[Your Bucket]/[Path to File]
39 """
40
41 client = language_v1.LanguageServiceClient()
42
43 # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt'
44
45 # Available types: PLAIN_TEXT, HTML
46 type_ = enums.Document.Type.PLAIN_TEXT
47
48 # Optional. If not specified, the language is automatically detected.
49 # For list of supported languages:
50 # https://cloud.google.com/natural-language/docs/languages
51 language = "en"
52 document = {"gcs_content_uri": gcs_content_uri, "type": type_, "language": language}
53
54 # Available values: NONE, UTF8, UTF16, UTF32
55 encoding_type = enums.EncodingType.UTF8
56
57 response = client.analyze_syntax(document, encoding_type=encoding_type)
58 # Loop through tokens returned from the API
59 for token in response.tokens:
60 # Get the text content of this token. Usually a word or punctuation.
61 text = token.text
62 print(u"Token text: {}".format(text.content))
63 print(
64 u"Location of this token in overall document: {}".format(text.begin_offset)
65 )
66 # Get the part of speech information for this token.
67 # Parts of spech are as defined in:
68 # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
69 part_of_speech = token.part_of_speech
70 # Get the tag, e.g. NOUN, ADJ for Adjective, et al.
71 print(
72 u"Part of Speech tag: {}".format(
73 enums.PartOfSpeech.Tag(part_of_speech.tag).name
74 )
75 )
76 # Get the voice, e.g. ACTIVE or PASSIVE
77 print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))
78 # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.
79 print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))
80 # See API reference for additional Part of Speech information available
81 # Get the lemma of the token. Wikipedia lemma description
82 # https://en.wikipedia.org/wiki/Lemma_(morphology)
83 print(u"Lemma: {}".format(token.lemma))
84 # Get the dependency tree parse information for this token.
85 # For more information on dependency labels:
86 # http://www.aclweb.org/anthology/P13-2017
87 dependency_edge = token.dependency_edge
88 print(u"Head token index: {}".format(dependency_edge.head_token_index))
89 print(
90 u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name)
91 )
92
93 # Get the language of the text, which will be the same as
94 # the language specified in the request or, if not specified,
95 # the automatically-detected language.
96 print(u"Language of the text: {}".format(response.language))
97
98
99 # [END language_syntax_gcs]
100
101
102 def main():
103 import argparse
104
105 parser = argparse.ArgumentParser()
106 parser.add_argument(
107 "--gcs_content_uri",
108 type=str,
109 default="gs://cloud-samples-data/language/syntax-sentence.txt",
110 )
111 args = parser.parse_args()
112
113 sample_analyze_syntax(args.gcs_content_uri)
114
115
116 if __name__ == "__main__":
117 main()
118
```
Path: `language/samples/v1/language_syntax_text.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2019 Google LLC
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # https://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 # DO NOT EDIT! This is a generated sample ("Request", "language_syntax_text")
18
19 # To install the latest published package dependency, execute the following:
20 # pip install google-cloud-language
21
22 # sample-metadata
23 # title: Analyzing Syntax
24 # description: Analyzing Syntax in a String
25 # usage: python3 samples/v1/language_syntax_text.py [--text_content "This is a short sentence."]
26
27 # [START language_syntax_text]
28 from google.cloud import language_v1
29 from google.cloud.language_v1 import enums
30
31
32 def sample_analyze_syntax(text_content):
33 """
34 Analyzing Syntax in a String
35
36 Args:
37 text_content The text content to analyze
38 """
39
40 client = language_v1.LanguageServiceClient()
41
42 # text_content = 'This is a short sentence.'
43
44 # Available types: PLAIN_TEXT, HTML
45 type_ = enums.Document.Type.PLAIN_TEXT
46
47 # Optional. If not specified, the language is automatically detected.
48 # For list of supported languages:
49 # https://cloud.google.com/natural-language/docs/languages
50 language = "en"
51 document = {"content": text_content, "type": type_, "language": language}
52
53 # Available values: NONE, UTF8, UTF16, UTF32
54 encoding_type = enums.EncodingType.UTF8
55
56 response = client.analyze_syntax(document, encoding_type=encoding_type)
57 # Loop through tokens returned from the API
58 for token in response.tokens:
59 # Get the text content of this token. Usually a word or punctuation.
60 text = token.text
61 print(u"Token text: {}".format(text.content))
62 print(
63 u"Location of this token in overall document: {}".format(text.begin_offset)
64 )
65 # Get the part of speech information for this token.
66 # Parts of spech are as defined in:
67 # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
68 part_of_speech = token.part_of_speech
69 # Get the tag, e.g. NOUN, ADJ for Adjective, et al.
70 print(
71 u"Part of Speech tag: {}".format(
72 enums.PartOfSpeech.Tag(part_of_speech.tag).name
73 )
74 )
75 # Get the voice, e.g. ACTIVE or PASSIVE
76 print(u"Voice: {}".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))
77 # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.
78 print(u"Tense: {}".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))
79 # See API reference for additional Part of Speech information available
80 # Get the lemma of the token. Wikipedia lemma description
81 # https://en.wikipedia.org/wiki/Lemma_(morphology)
82 print(u"Lemma: {}".format(token.lemma))
83 # Get the dependency tree parse information for this token.
84 # For more information on dependency labels:
85 # http://www.aclweb.org/anthology/P13-2017
86 dependency_edge = token.dependency_edge
87 print(u"Head token index: {}".format(dependency_edge.head_token_index))
88 print(
89 u"Label: {}".format(enums.DependencyEdge.Label(dependency_edge.label).name)
90 )
91
92 # Get the language of the text, which will be the same as
93 # the language specified in the request or, if not specified,
94 # the automatically-detected language.
95 print(u"Language of the text: {}".format(response.language))
96
97
98 # [END language_syntax_text]
99
100
101 def main():
102 import argparse
103
104 parser = argparse.ArgumentParser()
105 parser.add_argument("--text_content", type=str, default="This is a short sentence.")
106 args = parser.parse_args()
107
108 sample_analyze_syntax(args.text_content)
109
110
111 if __name__ == "__main__":
112 main()
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/language/samples/v1/language_syntax_gcs.py b/language/samples/v1/language_syntax_gcs.py
--- a/language/samples/v1/language_syntax_gcs.py
+++ b/language/samples/v1/language_syntax_gcs.py
@@ -64,7 +64,7 @@
u"Location of this token in overall document: {}".format(text.begin_offset)
)
# Get the part of speech information for this token.
- # Parts of spech are as defined in:
+ # Parts of speech are as defined in:
# http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
part_of_speech = token.part_of_speech
# Get the tag, e.g. NOUN, ADJ for Adjective, et al.
diff --git a/language/samples/v1/language_syntax_text.py b/language/samples/v1/language_syntax_text.py
--- a/language/samples/v1/language_syntax_text.py
+++ b/language/samples/v1/language_syntax_text.py
@@ -63,7 +63,7 @@
u"Location of this token in overall document: {}".format(text.begin_offset)
)
# Get the part of speech information for this token.
- # Parts of spech are as defined in:
+ # Parts of speech are as defined in:
# http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
part_of_speech = token.part_of_speech
# Get the tag, e.g. NOUN, ADJ for Adjective, et al.
|
{"golden_diff": "diff --git a/language/samples/v1/language_syntax_gcs.py b/language/samples/v1/language_syntax_gcs.py\n--- a/language/samples/v1/language_syntax_gcs.py\n+++ b/language/samples/v1/language_syntax_gcs.py\n@@ -64,7 +64,7 @@\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n- # Parts of spech are as defined in:\n+ # Parts of speech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\ndiff --git a/language/samples/v1/language_syntax_text.py b/language/samples/v1/language_syntax_text.py\n--- a/language/samples/v1/language_syntax_text.py\n+++ b/language/samples/v1/language_syntax_text.py\n@@ -63,7 +63,7 @@\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n- # Parts of spech are as defined in:\n+ # Parts of speech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n", "issue": "Language: typo (\"spech\") in Natural Language samples\n#### Environment details\r\n\r\nN/A\r\n\r\n#### Steps to reproduce\r\n\r\nN/A\r\n\r\n#### Code example\r\n\r\nComments say \"Parts of spech\" instead of \"Parts of speech\"\r\nhttps://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_gcs.py#L67\r\nhttps://github.com/googleapis/google-cloud-python/blob/master/language/samples/v1/language_syntax_text.py#L66\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"Request\", \"language_syntax_gcs\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-language\n\n# sample-metadata\n# title: Analyzing Syntax (GCS)\n# description: Analyzing Syntax in text file stored in Cloud Storage\n# usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri \"gs://cloud-samples-data/language/syntax-sentence.txt\"]\n\n# [START language_syntax_gcs]\nfrom google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\n\n\ndef sample_analyze_syntax(gcs_content_uri):\n \"\"\"\n Analyzing Syntax in text file stored in Cloud Storage\n\n Args:\n gcs_content_uri Google Cloud Storage URI where the file content is located.\n e.g. gs://[Your Bucket]/[Path to File]\n \"\"\"\n\n client = language_v1.LanguageServiceClient()\n\n # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"gcs_content_uri\": gcs_content_uri, \"type\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_syntax(document, encoding_type=encoding_type)\n # Loop through tokens returned from the API\n for token in response.tokens:\n # Get the text content of this token. Usually a word or punctuation.\n text = token.text\n print(u\"Token text: {}\".format(text.content))\n print(\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n # Parts of spech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n print(\n u\"Part of Speech tag: {}\".format(\n enums.PartOfSpeech.Tag(part_of_speech.tag).name\n )\n )\n # Get the voice, e.g. ACTIVE or PASSIVE\n print(u\"Voice: {}\".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))\n # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.\n print(u\"Tense: {}\".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))\n # See API reference for additional Part of Speech information available\n # Get the lemma of the token. Wikipedia lemma description\n # https://en.wikipedia.org/wiki/Lemma_(morphology)\n print(u\"Lemma: {}\".format(token.lemma))\n # Get the dependency tree parse information for this token.\n # For more information on dependency labels:\n # http://www.aclweb.org/anthology/P13-2017\n dependency_edge = token.dependency_edge\n print(u\"Head token index: {}\".format(dependency_edge.head_token_index))\n print(\n u\"Label: {}\".format(enums.DependencyEdge.Label(dependency_edge.label).name)\n )\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n\n# [END language_syntax_gcs]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gcs_content_uri\",\n type=str,\n default=\"gs://cloud-samples-data/language/syntax-sentence.txt\",\n )\n args = parser.parse_args()\n\n sample_analyze_syntax(args.gcs_content_uri)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "language/samples/v1/language_syntax_gcs.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"Request\", \"language_syntax_text\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-language\n\n# sample-metadata\n# title: Analyzing Syntax\n# description: Analyzing Syntax in a String\n# usage: python3 samples/v1/language_syntax_text.py [--text_content \"This is a short sentence.\"]\n\n# [START language_syntax_text]\nfrom google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\n\n\ndef sample_analyze_syntax(text_content):\n \"\"\"\n Analyzing Syntax in a String\n\n Args:\n text_content The text content to analyze\n \"\"\"\n\n client = language_v1.LanguageServiceClient()\n\n # text_content = 'This is a short sentence.'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"content\": text_content, \"type\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_syntax(document, encoding_type=encoding_type)\n # Loop through tokens returned from the API\n for token in response.tokens:\n # Get the text content of this token. Usually a word or punctuation.\n text = token.text\n print(u\"Token text: {}\".format(text.content))\n print(\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n # Parts of spech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n print(\n u\"Part of Speech tag: {}\".format(\n enums.PartOfSpeech.Tag(part_of_speech.tag).name\n )\n )\n # Get the voice, e.g. ACTIVE or PASSIVE\n print(u\"Voice: {}\".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))\n # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.\n print(u\"Tense: {}\".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))\n # See API reference for additional Part of Speech information available\n # Get the lemma of the token. Wikipedia lemma description\n # https://en.wikipedia.org/wiki/Lemma_(morphology)\n print(u\"Lemma: {}\".format(token.lemma))\n # Get the dependency tree parse information for this token.\n # For more information on dependency labels:\n # http://www.aclweb.org/anthology/P13-2017\n dependency_edge = token.dependency_edge\n print(u\"Head token index: {}\".format(dependency_edge.head_token_index))\n print(\n u\"Label: {}\".format(enums.DependencyEdge.Label(dependency_edge.label).name)\n )\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n\n# [END language_syntax_text]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--text_content\", type=str, default=\"This is a short sentence.\")\n args = parser.parse_args()\n\n sample_analyze_syntax(args.text_content)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "language/samples/v1/language_syntax_text.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"Request\", \"language_syntax_gcs\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-language\n\n# sample-metadata\n# title: Analyzing Syntax (GCS)\n# description: Analyzing Syntax in text file stored in Cloud Storage\n# usage: python3 samples/v1/language_syntax_gcs.py [--gcs_content_uri \"gs://cloud-samples-data/language/syntax-sentence.txt\"]\n\n# [START language_syntax_gcs]\nfrom google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\n\n\ndef sample_analyze_syntax(gcs_content_uri):\n \"\"\"\n Analyzing Syntax in text file stored in Cloud Storage\n\n Args:\n gcs_content_uri Google Cloud Storage URI where the file content is located.\n e.g. gs://[Your Bucket]/[Path to File]\n \"\"\"\n\n client = language_v1.LanguageServiceClient()\n\n # gcs_content_uri = 'gs://cloud-samples-data/language/syntax-sentence.txt'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"gcs_content_uri\": gcs_content_uri, \"type\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_syntax(document, encoding_type=encoding_type)\n # Loop through tokens returned from the API\n for token in response.tokens:\n # Get the text content of this token. Usually a word or punctuation.\n text = token.text\n print(u\"Token text: {}\".format(text.content))\n print(\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n # Parts of speech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n print(\n u\"Part of Speech tag: {}\".format(\n enums.PartOfSpeech.Tag(part_of_speech.tag).name\n )\n )\n # Get the voice, e.g. ACTIVE or PASSIVE\n print(u\"Voice: {}\".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))\n # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.\n print(u\"Tense: {}\".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))\n # See API reference for additional Part of Speech information available\n # Get the lemma of the token. Wikipedia lemma description\n # https://en.wikipedia.org/wiki/Lemma_(morphology)\n print(u\"Lemma: {}\".format(token.lemma))\n # Get the dependency tree parse information for this token.\n # For more information on dependency labels:\n # http://www.aclweb.org/anthology/P13-2017\n dependency_edge = token.dependency_edge\n print(u\"Head token index: {}\".format(dependency_edge.head_token_index))\n print(\n u\"Label: {}\".format(enums.DependencyEdge.Label(dependency_edge.label).name)\n )\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n\n# [END language_syntax_gcs]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gcs_content_uri\",\n type=str,\n default=\"gs://cloud-samples-data/language/syntax-sentence.txt\",\n )\n args = parser.parse_args()\n\n sample_analyze_syntax(args.gcs_content_uri)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "language/samples/v1/language_syntax_gcs.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT! This is a generated sample (\"Request\", \"language_syntax_text\")\n\n# To install the latest published package dependency, execute the following:\n# pip install google-cloud-language\n\n# sample-metadata\n# title: Analyzing Syntax\n# description: Analyzing Syntax in a String\n# usage: python3 samples/v1/language_syntax_text.py [--text_content \"This is a short sentence.\"]\n\n# [START language_syntax_text]\nfrom google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\n\n\ndef sample_analyze_syntax(text_content):\n \"\"\"\n Analyzing Syntax in a String\n\n Args:\n text_content The text content to analyze\n \"\"\"\n\n client = language_v1.LanguageServiceClient()\n\n # text_content = 'This is a short sentence.'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"content\": text_content, \"type\": type_, \"language\": language}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_syntax(document, encoding_type=encoding_type)\n # Loop through tokens returned from the API\n for token in response.tokens:\n # Get the text content of this token. Usually a word or punctuation.\n text = token.text\n print(u\"Token text: {}\".format(text.content))\n print(\n u\"Location of this token in overall document: {}\".format(text.begin_offset)\n )\n # Get the part of speech information for this token.\n # Parts of speech are as defined in:\n # http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf\n part_of_speech = token.part_of_speech\n # Get the tag, e.g. NOUN, ADJ for Adjective, et al.\n print(\n u\"Part of Speech tag: {}\".format(\n enums.PartOfSpeech.Tag(part_of_speech.tag).name\n )\n )\n # Get the voice, e.g. ACTIVE or PASSIVE\n print(u\"Voice: {}\".format(enums.PartOfSpeech.Voice(part_of_speech.voice).name))\n # Get the tense, e.g. PAST, FUTURE, PRESENT, et al.\n print(u\"Tense: {}\".format(enums.PartOfSpeech.Tense(part_of_speech.tense).name))\n # See API reference for additional Part of Speech information available\n # Get the lemma of the token. Wikipedia lemma description\n # https://en.wikipedia.org/wiki/Lemma_(morphology)\n print(u\"Lemma: {}\".format(token.lemma))\n # Get the dependency tree parse information for this token.\n # For more information on dependency labels:\n # http://www.aclweb.org/anthology/P13-2017\n dependency_edge = token.dependency_edge\n print(u\"Head token index: {}\".format(dependency_edge.head_token_index))\n print(\n u\"Label: {}\".format(enums.DependencyEdge.Label(dependency_edge.label).name)\n )\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n print(u\"Language of the text: {}\".format(response.language))\n\n\n# [END language_syntax_text]\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--text_content\", type=str, default=\"This is a short sentence.\")\n args = parser.parse_args()\n\n sample_analyze_syntax(args.text_content)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "language/samples/v1/language_syntax_text.py"}]}
| 2,903 | 352 |
gh_patches_debug_10108
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4074
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support pathlib.Path in FEED_URI
Make things work the same when the value assigned to the `FEED_URI` setting is a string containing a path or an instance of `pathlib.Path`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/extensions/feedexport.py`
Content:
```
1 """
2 Feed Exports extension
3
4 See documentation in docs/topics/feed-exports.rst
5 """
6
7 import os
8 import sys
9 import logging
10 import posixpath
11 from tempfile import NamedTemporaryFile
12 from datetime import datetime
13 import six
14 from six.moves.urllib.parse import urlparse, unquote
15 from ftplib import FTP
16
17 from zope.interface import Interface, implementer
18 from twisted.internet import defer, threads
19 from w3lib.url import file_uri_to_path
20
21 from scrapy import signals
22 from scrapy.utils.ftp import ftp_makedirs_cwd
23 from scrapy.exceptions import NotConfigured
24 from scrapy.utils.misc import create_instance, load_object
25 from scrapy.utils.log import failure_to_exc_info
26 from scrapy.utils.python import without_none_values
27 from scrapy.utils.boto import is_botocore
28
29 logger = logging.getLogger(__name__)
30
31
32 class IFeedStorage(Interface):
33 """Interface that all Feed Storages must implement"""
34
35 def __init__(uri):
36 """Initialize the storage with the parameters given in the URI"""
37
38 def open(spider):
39 """Open the storage for the given spider. It must return a file-like
40 object that will be used for the exporters"""
41
42 def store(file):
43 """Store the given file stream"""
44
45
46 @implementer(IFeedStorage)
47 class BlockingFeedStorage(object):
48
49 def open(self, spider):
50 path = spider.crawler.settings['FEED_TEMPDIR']
51 if path and not os.path.isdir(path):
52 raise OSError('Not a Directory: ' + str(path))
53
54 return NamedTemporaryFile(prefix='feed-', dir=path)
55
56 def store(self, file):
57 return threads.deferToThread(self._store_in_thread, file)
58
59 def _store_in_thread(self, file):
60 raise NotImplementedError
61
62
63 @implementer(IFeedStorage)
64 class StdoutFeedStorage(object):
65
66 def __init__(self, uri, _stdout=None):
67 if not _stdout:
68 _stdout = sys.stdout if six.PY2 else sys.stdout.buffer
69 self._stdout = _stdout
70
71 def open(self, spider):
72 return self._stdout
73
74 def store(self, file):
75 pass
76
77
78 @implementer(IFeedStorage)
79 class FileFeedStorage(object):
80
81 def __init__(self, uri):
82 self.path = file_uri_to_path(uri)
83
84 def open(self, spider):
85 dirname = os.path.dirname(self.path)
86 if dirname and not os.path.exists(dirname):
87 os.makedirs(dirname)
88 return open(self.path, 'ab')
89
90 def store(self, file):
91 file.close()
92
93
94 class S3FeedStorage(BlockingFeedStorage):
95
96 def __init__(self, uri, access_key=None, secret_key=None, acl=None):
97 # BEGIN Backward compatibility for initialising without keys (and
98 # without using from_crawler)
99 no_defaults = access_key is None and secret_key is None
100 if no_defaults:
101 from scrapy.utils.project import get_project_settings
102 settings = get_project_settings()
103 if 'AWS_ACCESS_KEY_ID' in settings or 'AWS_SECRET_ACCESS_KEY' in settings:
104 import warnings
105 from scrapy.exceptions import ScrapyDeprecationWarning
106 warnings.warn(
107 "Initialising `scrapy.extensions.feedexport.S3FeedStorage` "
108 "without AWS keys is deprecated. Please supply credentials or "
109 "use the `from_crawler()` constructor.",
110 category=ScrapyDeprecationWarning,
111 stacklevel=2
112 )
113 access_key = settings['AWS_ACCESS_KEY_ID']
114 secret_key = settings['AWS_SECRET_ACCESS_KEY']
115 # END Backward compatibility
116 u = urlparse(uri)
117 self.bucketname = u.hostname
118 self.access_key = u.username or access_key
119 self.secret_key = u.password or secret_key
120 self.is_botocore = is_botocore()
121 self.keyname = u.path[1:] # remove first "/"
122 self.acl = acl
123 if self.is_botocore:
124 import botocore.session
125 session = botocore.session.get_session()
126 self.s3_client = session.create_client(
127 's3', aws_access_key_id=self.access_key,
128 aws_secret_access_key=self.secret_key)
129 else:
130 import boto
131 self.connect_s3 = boto.connect_s3
132
133 @classmethod
134 def from_crawler(cls, crawler, uri):
135 return cls(
136 uri=uri,
137 access_key=crawler.settings['AWS_ACCESS_KEY_ID'],
138 secret_key=crawler.settings['AWS_SECRET_ACCESS_KEY'],
139 acl=crawler.settings['FEED_STORAGE_S3_ACL'] or None
140 )
141
142 def _store_in_thread(self, file):
143 file.seek(0)
144 if self.is_botocore:
145 kwargs = {'ACL': self.acl} if self.acl else {}
146 self.s3_client.put_object(
147 Bucket=self.bucketname, Key=self.keyname, Body=file,
148 **kwargs)
149 else:
150 conn = self.connect_s3(self.access_key, self.secret_key)
151 bucket = conn.get_bucket(self.bucketname, validate=False)
152 key = bucket.new_key(self.keyname)
153 kwargs = {'policy': self.acl} if self.acl else {}
154 key.set_contents_from_file(file, **kwargs)
155 key.close()
156
157
158 class FTPFeedStorage(BlockingFeedStorage):
159
160 def __init__(self, uri, use_active_mode=False):
161 u = urlparse(uri)
162 self.host = u.hostname
163 self.port = int(u.port or '21')
164 self.username = u.username
165 self.password = unquote(u.password)
166 self.path = u.path
167 self.use_active_mode = use_active_mode
168
169 @classmethod
170 def from_crawler(cls, crawler, uri):
171 return cls(
172 uri=uri,
173 use_active_mode=crawler.settings.getbool('FEED_STORAGE_FTP_ACTIVE')
174 )
175
176 def _store_in_thread(self, file):
177 file.seek(0)
178 ftp = FTP()
179 ftp.connect(self.host, self.port)
180 ftp.login(self.username, self.password)
181 if self.use_active_mode:
182 ftp.set_pasv(False)
183 dirname, filename = posixpath.split(self.path)
184 ftp_makedirs_cwd(ftp, dirname)
185 ftp.storbinary('STOR %s' % filename, file)
186 ftp.quit()
187
188
189 class SpiderSlot(object):
190 def __init__(self, file, exporter, storage, uri):
191 self.file = file
192 self.exporter = exporter
193 self.storage = storage
194 self.uri = uri
195 self.itemcount = 0
196
197
198 class FeedExporter(object):
199
200 def __init__(self, settings):
201 self.settings = settings
202 self.urifmt = settings['FEED_URI']
203 if not self.urifmt:
204 raise NotConfigured
205 self.format = settings['FEED_FORMAT'].lower()
206 self.export_encoding = settings['FEED_EXPORT_ENCODING']
207 self.storages = self._load_components('FEED_STORAGES')
208 self.exporters = self._load_components('FEED_EXPORTERS')
209 if not self._storage_supported(self.urifmt):
210 raise NotConfigured
211 if not self._exporter_supported(self.format):
212 raise NotConfigured
213 self.store_empty = settings.getbool('FEED_STORE_EMPTY')
214 self._exporting = False
215 self.export_fields = settings.getlist('FEED_EXPORT_FIELDS') or None
216 self.indent = None
217 if settings.get('FEED_EXPORT_INDENT') is not None:
218 self.indent = settings.getint('FEED_EXPORT_INDENT')
219 uripar = settings['FEED_URI_PARAMS']
220 self._uripar = load_object(uripar) if uripar else lambda x, y: None
221
222 @classmethod
223 def from_crawler(cls, crawler):
224 o = cls(crawler.settings)
225 o.crawler = crawler
226 crawler.signals.connect(o.open_spider, signals.spider_opened)
227 crawler.signals.connect(o.close_spider, signals.spider_closed)
228 crawler.signals.connect(o.item_scraped, signals.item_scraped)
229 return o
230
231 def open_spider(self, spider):
232 uri = self.urifmt % self._get_uri_params(spider)
233 storage = self._get_storage(uri)
234 file = storage.open(spider)
235 exporter = self._get_exporter(file, fields_to_export=self.export_fields,
236 encoding=self.export_encoding, indent=self.indent)
237 if self.store_empty:
238 exporter.start_exporting()
239 self._exporting = True
240 self.slot = SpiderSlot(file, exporter, storage, uri)
241
242 def close_spider(self, spider):
243 slot = self.slot
244 if not slot.itemcount and not self.store_empty:
245 # We need to call slot.storage.store nonetheless to get the file
246 # properly closed.
247 return defer.maybeDeferred(slot.storage.store, slot.file)
248 if self._exporting:
249 slot.exporter.finish_exporting()
250 self._exporting = False
251 logfmt = "%s %%(format)s feed (%%(itemcount)d items) in: %%(uri)s"
252 log_args = {'format': self.format,
253 'itemcount': slot.itemcount,
254 'uri': slot.uri}
255 d = defer.maybeDeferred(slot.storage.store, slot.file)
256 d.addCallback(lambda _: logger.info(logfmt % "Stored", log_args,
257 extra={'spider': spider}))
258 d.addErrback(lambda f: logger.error(logfmt % "Error storing", log_args,
259 exc_info=failure_to_exc_info(f),
260 extra={'spider': spider}))
261 return d
262
263 def item_scraped(self, item, spider):
264 slot = self.slot
265 if not self._exporting:
266 slot.exporter.start_exporting()
267 self._exporting = True
268 slot.exporter.export_item(item)
269 slot.itemcount += 1
270 return item
271
272 def _load_components(self, setting_prefix):
273 conf = without_none_values(self.settings.getwithbase(setting_prefix))
274 d = {}
275 for k, v in conf.items():
276 try:
277 d[k] = load_object(v)
278 except NotConfigured:
279 pass
280 return d
281
282 def _exporter_supported(self, format):
283 if format in self.exporters:
284 return True
285 logger.error("Unknown feed format: %(format)s", {'format': format})
286
287 def _storage_supported(self, uri):
288 scheme = urlparse(uri).scheme
289 if scheme in self.storages:
290 try:
291 self._get_storage(uri)
292 return True
293 except NotConfigured as e:
294 logger.error("Disabled feed storage scheme: %(scheme)s. "
295 "Reason: %(reason)s",
296 {'scheme': scheme, 'reason': str(e)})
297 else:
298 logger.error("Unknown feed storage scheme: %(scheme)s",
299 {'scheme': scheme})
300
301 def _get_instance(self, objcls, *args, **kwargs):
302 return create_instance(
303 objcls, self.settings, getattr(self, 'crawler', None),
304 *args, **kwargs)
305
306 def _get_exporter(self, *args, **kwargs):
307 return self._get_instance(self.exporters[self.format], *args, **kwargs)
308
309 def _get_storage(self, uri):
310 return self._get_instance(self.storages[urlparse(uri).scheme], uri)
311
312 def _get_uri_params(self, spider):
313 params = {}
314 for k in dir(spider):
315 params[k] = getattr(spider, k)
316 ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-')
317 params['time'] = ts
318 self._uripar(params, spider)
319 return params
320
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/extensions/feedexport.py b/scrapy/extensions/feedexport.py
--- a/scrapy/extensions/feedexport.py
+++ b/scrapy/extensions/feedexport.py
@@ -199,9 +199,9 @@
def __init__(self, settings):
self.settings = settings
- self.urifmt = settings['FEED_URI']
- if not self.urifmt:
+ if not settings['FEED_URI']:
raise NotConfigured
+ self.urifmt = str(settings['FEED_URI'])
self.format = settings['FEED_FORMAT'].lower()
self.export_encoding = settings['FEED_EXPORT_ENCODING']
self.storages = self._load_components('FEED_STORAGES')
|
{"golden_diff": "diff --git a/scrapy/extensions/feedexport.py b/scrapy/extensions/feedexport.py\n--- a/scrapy/extensions/feedexport.py\n+++ b/scrapy/extensions/feedexport.py\n@@ -199,9 +199,9 @@\n \n def __init__(self, settings):\n self.settings = settings\n- self.urifmt = settings['FEED_URI']\n- if not self.urifmt:\n+ if not settings['FEED_URI']:\n raise NotConfigured\n+ self.urifmt = str(settings['FEED_URI'])\n self.format = settings['FEED_FORMAT'].lower()\n self.export_encoding = settings['FEED_EXPORT_ENCODING']\n self.storages = self._load_components('FEED_STORAGES')\n", "issue": "Support pathlib.Path in FEED_URI\nMake things work the same when the value assigned to the `FEED_URI` setting is a string containing a path or an instance of `pathlib.Path`.\n", "before_files": [{"content": "\"\"\"\nFeed Exports extension\n\nSee documentation in docs/topics/feed-exports.rst\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport posixpath\nfrom tempfile import NamedTemporaryFile\nfrom datetime import datetime\nimport six\nfrom six.moves.urllib.parse import urlparse, unquote\nfrom ftplib import FTP\n\nfrom zope.interface import Interface, implementer\nfrom twisted.internet import defer, threads\nfrom w3lib.url import file_uri_to_path\n\nfrom scrapy import signals\nfrom scrapy.utils.ftp import ftp_makedirs_cwd\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.misc import create_instance, load_object\nfrom scrapy.utils.log import failure_to_exc_info\nfrom scrapy.utils.python import without_none_values\nfrom scrapy.utils.boto import is_botocore\n\nlogger = logging.getLogger(__name__)\n\n\nclass IFeedStorage(Interface):\n \"\"\"Interface that all Feed Storages must implement\"\"\"\n\n def __init__(uri):\n \"\"\"Initialize the storage with the parameters given in the URI\"\"\"\n\n def open(spider):\n \"\"\"Open the storage for the given spider. It must return a file-like\n object that will be used for the exporters\"\"\"\n\n def store(file):\n \"\"\"Store the given file stream\"\"\"\n\n\n@implementer(IFeedStorage)\nclass BlockingFeedStorage(object):\n\n def open(self, spider):\n path = spider.crawler.settings['FEED_TEMPDIR']\n if path and not os.path.isdir(path):\n raise OSError('Not a Directory: ' + str(path))\n\n return NamedTemporaryFile(prefix='feed-', dir=path)\n\n def store(self, file):\n return threads.deferToThread(self._store_in_thread, file)\n\n def _store_in_thread(self, file):\n raise NotImplementedError\n\n\n@implementer(IFeedStorage)\nclass StdoutFeedStorage(object):\n\n def __init__(self, uri, _stdout=None):\n if not _stdout:\n _stdout = sys.stdout if six.PY2 else sys.stdout.buffer\n self._stdout = _stdout\n\n def open(self, spider):\n return self._stdout\n\n def store(self, file):\n pass\n\n\n@implementer(IFeedStorage)\nclass FileFeedStorage(object):\n\n def __init__(self, uri):\n self.path = file_uri_to_path(uri)\n\n def open(self, spider):\n dirname = os.path.dirname(self.path)\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)\n return open(self.path, 'ab')\n\n def store(self, file):\n file.close()\n\n\nclass S3FeedStorage(BlockingFeedStorage):\n\n def __init__(self, uri, access_key=None, secret_key=None, acl=None):\n # BEGIN Backward compatibility for initialising without keys (and\n # without using from_crawler)\n no_defaults = access_key is None and secret_key is None\n if no_defaults:\n from scrapy.utils.project import get_project_settings\n settings = get_project_settings()\n if 'AWS_ACCESS_KEY_ID' in settings or 'AWS_SECRET_ACCESS_KEY' in settings:\n import warnings\n from scrapy.exceptions import ScrapyDeprecationWarning\n warnings.warn(\n \"Initialising `scrapy.extensions.feedexport.S3FeedStorage` \"\n \"without AWS keys is deprecated. Please supply credentials or \"\n \"use the `from_crawler()` constructor.\",\n category=ScrapyDeprecationWarning,\n stacklevel=2\n )\n access_key = settings['AWS_ACCESS_KEY_ID']\n secret_key = settings['AWS_SECRET_ACCESS_KEY']\n # END Backward compatibility\n u = urlparse(uri)\n self.bucketname = u.hostname\n self.access_key = u.username or access_key\n self.secret_key = u.password or secret_key\n self.is_botocore = is_botocore()\n self.keyname = u.path[1:] # remove first \"/\"\n self.acl = acl\n if self.is_botocore:\n import botocore.session\n session = botocore.session.get_session()\n self.s3_client = session.create_client(\n 's3', aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key)\n else:\n import boto\n self.connect_s3 = boto.connect_s3\n\n @classmethod\n def from_crawler(cls, crawler, uri):\n return cls(\n uri=uri,\n access_key=crawler.settings['AWS_ACCESS_KEY_ID'],\n secret_key=crawler.settings['AWS_SECRET_ACCESS_KEY'],\n acl=crawler.settings['FEED_STORAGE_S3_ACL'] or None\n )\n\n def _store_in_thread(self, file):\n file.seek(0)\n if self.is_botocore:\n kwargs = {'ACL': self.acl} if self.acl else {}\n self.s3_client.put_object(\n Bucket=self.bucketname, Key=self.keyname, Body=file,\n **kwargs)\n else:\n conn = self.connect_s3(self.access_key, self.secret_key)\n bucket = conn.get_bucket(self.bucketname, validate=False)\n key = bucket.new_key(self.keyname)\n kwargs = {'policy': self.acl} if self.acl else {}\n key.set_contents_from_file(file, **kwargs)\n key.close()\n\n\nclass FTPFeedStorage(BlockingFeedStorage):\n\n def __init__(self, uri, use_active_mode=False):\n u = urlparse(uri)\n self.host = u.hostname\n self.port = int(u.port or '21')\n self.username = u.username\n self.password = unquote(u.password)\n self.path = u.path\n self.use_active_mode = use_active_mode\n\n @classmethod\n def from_crawler(cls, crawler, uri):\n return cls(\n uri=uri,\n use_active_mode=crawler.settings.getbool('FEED_STORAGE_FTP_ACTIVE')\n )\n\n def _store_in_thread(self, file):\n file.seek(0)\n ftp = FTP()\n ftp.connect(self.host, self.port)\n ftp.login(self.username, self.password)\n if self.use_active_mode:\n ftp.set_pasv(False)\n dirname, filename = posixpath.split(self.path)\n ftp_makedirs_cwd(ftp, dirname)\n ftp.storbinary('STOR %s' % filename, file)\n ftp.quit()\n\n\nclass SpiderSlot(object):\n def __init__(self, file, exporter, storage, uri):\n self.file = file\n self.exporter = exporter\n self.storage = storage\n self.uri = uri\n self.itemcount = 0\n\n\nclass FeedExporter(object):\n\n def __init__(self, settings):\n self.settings = settings\n self.urifmt = settings['FEED_URI']\n if not self.urifmt:\n raise NotConfigured\n self.format = settings['FEED_FORMAT'].lower()\n self.export_encoding = settings['FEED_EXPORT_ENCODING']\n self.storages = self._load_components('FEED_STORAGES')\n self.exporters = self._load_components('FEED_EXPORTERS')\n if not self._storage_supported(self.urifmt):\n raise NotConfigured\n if not self._exporter_supported(self.format):\n raise NotConfigured\n self.store_empty = settings.getbool('FEED_STORE_EMPTY')\n self._exporting = False\n self.export_fields = settings.getlist('FEED_EXPORT_FIELDS') or None\n self.indent = None\n if settings.get('FEED_EXPORT_INDENT') is not None:\n self.indent = settings.getint('FEED_EXPORT_INDENT')\n uripar = settings['FEED_URI_PARAMS']\n self._uripar = load_object(uripar) if uripar else lambda x, y: None\n\n @classmethod\n def from_crawler(cls, crawler):\n o = cls(crawler.settings)\n o.crawler = crawler\n crawler.signals.connect(o.open_spider, signals.spider_opened)\n crawler.signals.connect(o.close_spider, signals.spider_closed)\n crawler.signals.connect(o.item_scraped, signals.item_scraped)\n return o\n\n def open_spider(self, spider):\n uri = self.urifmt % self._get_uri_params(spider)\n storage = self._get_storage(uri)\n file = storage.open(spider)\n exporter = self._get_exporter(file, fields_to_export=self.export_fields,\n encoding=self.export_encoding, indent=self.indent)\n if self.store_empty:\n exporter.start_exporting()\n self._exporting = True\n self.slot = SpiderSlot(file, exporter, storage, uri)\n\n def close_spider(self, spider):\n slot = self.slot\n if not slot.itemcount and not self.store_empty:\n # We need to call slot.storage.store nonetheless to get the file\n # properly closed.\n return defer.maybeDeferred(slot.storage.store, slot.file)\n if self._exporting:\n slot.exporter.finish_exporting()\n self._exporting = False\n logfmt = \"%s %%(format)s feed (%%(itemcount)d items) in: %%(uri)s\"\n log_args = {'format': self.format,\n 'itemcount': slot.itemcount,\n 'uri': slot.uri}\n d = defer.maybeDeferred(slot.storage.store, slot.file)\n d.addCallback(lambda _: logger.info(logfmt % \"Stored\", log_args,\n extra={'spider': spider}))\n d.addErrback(lambda f: logger.error(logfmt % \"Error storing\", log_args,\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n return d\n\n def item_scraped(self, item, spider):\n slot = self.slot\n if not self._exporting:\n slot.exporter.start_exporting()\n self._exporting = True\n slot.exporter.export_item(item)\n slot.itemcount += 1\n return item\n\n def _load_components(self, setting_prefix):\n conf = without_none_values(self.settings.getwithbase(setting_prefix))\n d = {}\n for k, v in conf.items():\n try:\n d[k] = load_object(v)\n except NotConfigured:\n pass\n return d\n\n def _exporter_supported(self, format):\n if format in self.exporters:\n return True\n logger.error(\"Unknown feed format: %(format)s\", {'format': format})\n\n def _storage_supported(self, uri):\n scheme = urlparse(uri).scheme\n if scheme in self.storages:\n try:\n self._get_storage(uri)\n return True\n except NotConfigured as e:\n logger.error(\"Disabled feed storage scheme: %(scheme)s. \"\n \"Reason: %(reason)s\",\n {'scheme': scheme, 'reason': str(e)})\n else:\n logger.error(\"Unknown feed storage scheme: %(scheme)s\",\n {'scheme': scheme})\n\n def _get_instance(self, objcls, *args, **kwargs):\n return create_instance(\n objcls, self.settings, getattr(self, 'crawler', None),\n *args, **kwargs)\n\n def _get_exporter(self, *args, **kwargs):\n return self._get_instance(self.exporters[self.format], *args, **kwargs)\n\n def _get_storage(self, uri):\n return self._get_instance(self.storages[urlparse(uri).scheme], uri)\n\n def _get_uri_params(self, spider):\n params = {}\n for k in dir(spider):\n params[k] = getattr(spider, k)\n ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-')\n params['time'] = ts\n self._uripar(params, spider)\n return params\n", "path": "scrapy/extensions/feedexport.py"}], "after_files": [{"content": "\"\"\"\nFeed Exports extension\n\nSee documentation in docs/topics/feed-exports.rst\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport posixpath\nfrom tempfile import NamedTemporaryFile\nfrom datetime import datetime\nimport six\nfrom six.moves.urllib.parse import urlparse, unquote\nfrom ftplib import FTP\n\nfrom zope.interface import Interface, implementer\nfrom twisted.internet import defer, threads\nfrom w3lib.url import file_uri_to_path\n\nfrom scrapy import signals\nfrom scrapy.utils.ftp import ftp_makedirs_cwd\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.misc import create_instance, load_object\nfrom scrapy.utils.log import failure_to_exc_info\nfrom scrapy.utils.python import without_none_values\nfrom scrapy.utils.boto import is_botocore\n\nlogger = logging.getLogger(__name__)\n\n\nclass IFeedStorage(Interface):\n \"\"\"Interface that all Feed Storages must implement\"\"\"\n\n def __init__(uri):\n \"\"\"Initialize the storage with the parameters given in the URI\"\"\"\n\n def open(spider):\n \"\"\"Open the storage for the given spider. It must return a file-like\n object that will be used for the exporters\"\"\"\n\n def store(file):\n \"\"\"Store the given file stream\"\"\"\n\n\n@implementer(IFeedStorage)\nclass BlockingFeedStorage(object):\n\n def open(self, spider):\n path = spider.crawler.settings['FEED_TEMPDIR']\n if path and not os.path.isdir(path):\n raise OSError('Not a Directory: ' + str(path))\n\n return NamedTemporaryFile(prefix='feed-', dir=path)\n\n def store(self, file):\n return threads.deferToThread(self._store_in_thread, file)\n\n def _store_in_thread(self, file):\n raise NotImplementedError\n\n\n@implementer(IFeedStorage)\nclass StdoutFeedStorage(object):\n\n def __init__(self, uri, _stdout=None):\n if not _stdout:\n _stdout = sys.stdout if six.PY2 else sys.stdout.buffer\n self._stdout = _stdout\n\n def open(self, spider):\n return self._stdout\n\n def store(self, file):\n pass\n\n\n@implementer(IFeedStorage)\nclass FileFeedStorage(object):\n\n def __init__(self, uri):\n self.path = file_uri_to_path(uri)\n\n def open(self, spider):\n dirname = os.path.dirname(self.path)\n if dirname and not os.path.exists(dirname):\n os.makedirs(dirname)\n return open(self.path, 'ab')\n\n def store(self, file):\n file.close()\n\n\nclass S3FeedStorage(BlockingFeedStorage):\n\n def __init__(self, uri, access_key=None, secret_key=None, acl=None):\n # BEGIN Backward compatibility for initialising without keys (and\n # without using from_crawler)\n no_defaults = access_key is None and secret_key is None\n if no_defaults:\n from scrapy.utils.project import get_project_settings\n settings = get_project_settings()\n if 'AWS_ACCESS_KEY_ID' in settings or 'AWS_SECRET_ACCESS_KEY' in settings:\n import warnings\n from scrapy.exceptions import ScrapyDeprecationWarning\n warnings.warn(\n \"Initialising `scrapy.extensions.feedexport.S3FeedStorage` \"\n \"without AWS keys is deprecated. Please supply credentials or \"\n \"use the `from_crawler()` constructor.\",\n category=ScrapyDeprecationWarning,\n stacklevel=2\n )\n access_key = settings['AWS_ACCESS_KEY_ID']\n secret_key = settings['AWS_SECRET_ACCESS_KEY']\n # END Backward compatibility\n u = urlparse(uri)\n self.bucketname = u.hostname\n self.access_key = u.username or access_key\n self.secret_key = u.password or secret_key\n self.is_botocore = is_botocore()\n self.keyname = u.path[1:] # remove first \"/\"\n self.acl = acl\n if self.is_botocore:\n import botocore.session\n session = botocore.session.get_session()\n self.s3_client = session.create_client(\n 's3', aws_access_key_id=self.access_key,\n aws_secret_access_key=self.secret_key)\n else:\n import boto\n self.connect_s3 = boto.connect_s3\n\n @classmethod\n def from_crawler(cls, crawler, uri):\n return cls(\n uri=uri,\n access_key=crawler.settings['AWS_ACCESS_KEY_ID'],\n secret_key=crawler.settings['AWS_SECRET_ACCESS_KEY'],\n acl=crawler.settings['FEED_STORAGE_S3_ACL'] or None\n )\n\n def _store_in_thread(self, file):\n file.seek(0)\n if self.is_botocore:\n kwargs = {'ACL': self.acl} if self.acl else {}\n self.s3_client.put_object(\n Bucket=self.bucketname, Key=self.keyname, Body=file,\n **kwargs)\n else:\n conn = self.connect_s3(self.access_key, self.secret_key)\n bucket = conn.get_bucket(self.bucketname, validate=False)\n key = bucket.new_key(self.keyname)\n kwargs = {'policy': self.acl} if self.acl else {}\n key.set_contents_from_file(file, **kwargs)\n key.close()\n\n\nclass FTPFeedStorage(BlockingFeedStorage):\n\n def __init__(self, uri, use_active_mode=False):\n u = urlparse(uri)\n self.host = u.hostname\n self.port = int(u.port or '21')\n self.username = u.username\n self.password = unquote(u.password)\n self.path = u.path\n self.use_active_mode = use_active_mode\n\n @classmethod\n def from_crawler(cls, crawler, uri):\n return cls(\n uri=uri,\n use_active_mode=crawler.settings.getbool('FEED_STORAGE_FTP_ACTIVE')\n )\n\n def _store_in_thread(self, file):\n file.seek(0)\n ftp = FTP()\n ftp.connect(self.host, self.port)\n ftp.login(self.username, self.password)\n if self.use_active_mode:\n ftp.set_pasv(False)\n dirname, filename = posixpath.split(self.path)\n ftp_makedirs_cwd(ftp, dirname)\n ftp.storbinary('STOR %s' % filename, file)\n ftp.quit()\n\n\nclass SpiderSlot(object):\n def __init__(self, file, exporter, storage, uri):\n self.file = file\n self.exporter = exporter\n self.storage = storage\n self.uri = uri\n self.itemcount = 0\n\n\nclass FeedExporter(object):\n\n def __init__(self, settings):\n self.settings = settings\n if not settings['FEED_URI']:\n raise NotConfigured\n self.urifmt = str(settings['FEED_URI'])\n self.format = settings['FEED_FORMAT'].lower()\n self.export_encoding = settings['FEED_EXPORT_ENCODING']\n self.storages = self._load_components('FEED_STORAGES')\n self.exporters = self._load_components('FEED_EXPORTERS')\n if not self._storage_supported(self.urifmt):\n raise NotConfigured\n if not self._exporter_supported(self.format):\n raise NotConfigured\n self.store_empty = settings.getbool('FEED_STORE_EMPTY')\n self._exporting = False\n self.export_fields = settings.getlist('FEED_EXPORT_FIELDS') or None\n self.indent = None\n if settings.get('FEED_EXPORT_INDENT') is not None:\n self.indent = settings.getint('FEED_EXPORT_INDENT')\n uripar = settings['FEED_URI_PARAMS']\n self._uripar = load_object(uripar) if uripar else lambda x, y: None\n\n @classmethod\n def from_crawler(cls, crawler):\n o = cls(crawler.settings)\n o.crawler = crawler\n crawler.signals.connect(o.open_spider, signals.spider_opened)\n crawler.signals.connect(o.close_spider, signals.spider_closed)\n crawler.signals.connect(o.item_scraped, signals.item_scraped)\n return o\n\n def open_spider(self, spider):\n uri = self.urifmt % self._get_uri_params(spider)\n storage = self._get_storage(uri)\n file = storage.open(spider)\n exporter = self._get_exporter(file, fields_to_export=self.export_fields,\n encoding=self.export_encoding, indent=self.indent)\n if self.store_empty:\n exporter.start_exporting()\n self._exporting = True\n self.slot = SpiderSlot(file, exporter, storage, uri)\n\n def close_spider(self, spider):\n slot = self.slot\n if not slot.itemcount and not self.store_empty:\n return\n if self._exporting:\n slot.exporter.finish_exporting()\n self._exporting = False\n logfmt = \"%s %%(format)s feed (%%(itemcount)d items) in: %%(uri)s\"\n log_args = {'format': self.format,\n 'itemcount': slot.itemcount,\n 'uri': slot.uri}\n d = defer.maybeDeferred(slot.storage.store, slot.file)\n d.addCallback(lambda _: logger.info(logfmt % \"Stored\", log_args,\n extra={'spider': spider}))\n d.addErrback(lambda f: logger.error(logfmt % \"Error storing\", log_args,\n exc_info=failure_to_exc_info(f),\n extra={'spider': spider}))\n return d\n\n def item_scraped(self, item, spider):\n slot = self.slot\n if not self._exporting:\n slot.exporter.start_exporting()\n self._exporting = True\n slot.exporter.export_item(item)\n slot.itemcount += 1\n return item\n\n def _load_components(self, setting_prefix):\n conf = without_none_values(self.settings.getwithbase(setting_prefix))\n d = {}\n for k, v in conf.items():\n try:\n d[k] = load_object(v)\n except NotConfigured:\n pass\n return d\n\n def _exporter_supported(self, format):\n if format in self.exporters:\n return True\n logger.error(\"Unknown feed format: %(format)s\", {'format': format})\n\n def _storage_supported(self, uri):\n scheme = urlparse(uri).scheme\n if scheme in self.storages:\n try:\n self._get_storage(uri)\n return True\n except NotConfigured as e:\n logger.error(\"Disabled feed storage scheme: %(scheme)s. \"\n \"Reason: %(reason)s\",\n {'scheme': scheme, 'reason': str(e)})\n else:\n logger.error(\"Unknown feed storage scheme: %(scheme)s\",\n {'scheme': scheme})\n\n def _get_instance(self, objcls, *args, **kwargs):\n return create_instance(\n objcls, self.settings, getattr(self, 'crawler', None),\n *args, **kwargs)\n\n def _get_exporter(self, *args, **kwargs):\n return self._get_instance(self.exporters[self.format], *args, **kwargs)\n\n def _get_storage(self, uri):\n return self._get_instance(self.storages[urlparse(uri).scheme], uri)\n\n def _get_uri_params(self, spider):\n params = {}\n for k in dir(spider):\n params[k] = getattr(spider, k)\n ts = datetime.utcnow().replace(microsecond=0).isoformat().replace(':', '-')\n params['time'] = ts\n self._uripar(params, spider)\n return params\n", "path": "scrapy/extensions/feedexport.py"}]}
| 3,644 | 159 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.