problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_9267 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1480 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
random.shuffle's random= argument got deprecated
Related issue: [bpo-40465](https://bugs.python.org/issue40465).
```
black..................................................................../home/isidentical/.venv/lib/python3.10/site-packages/pre_commit/languages/helpers.py:95: DeprecationWarning: The *random* parameter to shuffle() has been deprecated
since Python 3.9 and will be removed in a subsequent version.
random.shuffle(seq, random=fixed_random.random)
Passed
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/helpers.py`
Content:
```
1 import multiprocessing
2 import os
3 import random
4 from typing import Any
5 from typing import List
6 from typing import Optional
7 from typing import overload
8 from typing import Sequence
9 from typing import Tuple
10 from typing import TYPE_CHECKING
11
12 import pre_commit.constants as C
13 from pre_commit.hook import Hook
14 from pre_commit.prefix import Prefix
15 from pre_commit.util import cmd_output_b
16 from pre_commit.xargs import xargs
17
18 if TYPE_CHECKING:
19 from typing import NoReturn
20
21 FIXED_RANDOM_SEED = 1542676186
22
23
24 def run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:
25 cmd_output_b(*cmd, cwd=prefix.prefix_dir)
26
27
28 @overload
29 def environment_dir(d: None, language_version: str) -> None: ...
30 @overload
31 def environment_dir(d: str, language_version: str) -> str: ...
32
33
34 def environment_dir(d: Optional[str], language_version: str) -> Optional[str]:
35 if d is None:
36 return None
37 else:
38 return f'{d}-{language_version}'
39
40
41 def assert_version_default(binary: str, version: str) -> None:
42 if version != C.DEFAULT:
43 raise AssertionError(
44 f'For now, pre-commit requires system-installed {binary}',
45 )
46
47
48 def assert_no_additional_deps(
49 lang: str,
50 additional_deps: Sequence[str],
51 ) -> None:
52 if additional_deps:
53 raise AssertionError(
54 f'For now, pre-commit does not support '
55 f'additional_dependencies for {lang}',
56 )
57
58
59 def basic_get_default_version() -> str:
60 return C.DEFAULT
61
62
63 def basic_healthy(prefix: Prefix, language_version: str) -> bool:
64 return True
65
66
67 def no_install(
68 prefix: Prefix,
69 version: str,
70 additional_dependencies: Sequence[str],
71 ) -> 'NoReturn':
72 raise AssertionError('This type is not installable')
73
74
75 def target_concurrency(hook: Hook) -> int:
76 if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:
77 return 1
78 else:
79 # Travis appears to have a bunch of CPUs, but we can't use them all.
80 if 'TRAVIS' in os.environ:
81 return 2
82 else:
83 try:
84 return multiprocessing.cpu_count()
85 except NotImplementedError:
86 return 1
87
88
89 def _shuffled(seq: Sequence[str]) -> List[str]:
90 """Deterministically shuffle"""
91 fixed_random = random.Random()
92 fixed_random.seed(FIXED_RANDOM_SEED, version=1)
93
94 seq = list(seq)
95 random.shuffle(seq, random=fixed_random.random)
96 return seq
97
98
99 def run_xargs(
100 hook: Hook,
101 cmd: Tuple[str, ...],
102 file_args: Sequence[str],
103 **kwargs: Any,
104 ) -> Tuple[int, bytes]:
105 # Shuffle the files so that they more evenly fill out the xargs partitions,
106 # but do it deterministically in case a hook cares about ordering.
107 file_args = _shuffled(file_args)
108 kwargs['target_concurrency'] = target_concurrency(hook)
109 return xargs(cmd, file_args, **kwargs)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py
--- a/pre_commit/languages/helpers.py
+++ b/pre_commit/languages/helpers.py
@@ -18,7 +18,7 @@
if TYPE_CHECKING:
from typing import NoReturn
-FIXED_RANDOM_SEED = 1542676186
+FIXED_RANDOM_SEED = 1542676187
def run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:
@@ -92,7 +92,7 @@
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
- random.shuffle(seq, random=fixed_random.random)
+ fixed_random.shuffle(seq)
return seq
| {"golden_diff": "diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py\n--- a/pre_commit/languages/helpers.py\n+++ b/pre_commit/languages/helpers.py\n@@ -18,7 +18,7 @@\n if TYPE_CHECKING:\n from typing import NoReturn\n \n-FIXED_RANDOM_SEED = 1542676186\n+FIXED_RANDOM_SEED = 1542676187\n \n \n def run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:\n@@ -92,7 +92,7 @@\n fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n \n seq = list(seq)\n- random.shuffle(seq, random=fixed_random.random)\n+ fixed_random.shuffle(seq)\n return seq\n", "issue": "random.shuffle's random= argument got deprecated\nRelated issue: [bpo-40465](https://bugs.python.org/issue40465).\r\n```\r\nblack..................................................................../home/isidentical/.venv/lib/python3.10/site-packages/pre_commit/languages/helpers.py:95: DeprecationWarning: The *random* parameter to shuffle() has been deprecated\r\nsince Python 3.9 and will be removed in a subsequent version.\r\n random.shuffle(seq, random=fixed_random.random)\r\nPassed\r\n```\r\n\r\n\n", "before_files": [{"content": "import multiprocessing\nimport os\nimport random\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import overload\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import TYPE_CHECKING\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.xargs import xargs\n\nif TYPE_CHECKING:\n from typing import NoReturn\n\nFIXED_RANDOM_SEED = 1542676186\n\n\ndef run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:\n cmd_output_b(*cmd, cwd=prefix.prefix_dir)\n\n\n@overload\ndef environment_dir(d: None, language_version: str) -> None: ...\n@overload\ndef environment_dir(d: str, language_version: str) -> str: ...\n\n\ndef environment_dir(d: Optional[str], language_version: str) -> Optional[str]:\n if d is None:\n return None\n else:\n return f'{d}-{language_version}'\n\n\ndef assert_version_default(binary: str, version: str) -> None:\n if version != C.DEFAULT:\n raise AssertionError(\n f'For now, pre-commit requires system-installed {binary}',\n )\n\n\ndef assert_no_additional_deps(\n lang: str,\n additional_deps: Sequence[str],\n) -> None:\n if additional_deps:\n raise AssertionError(\n f'For now, pre-commit does not support '\n f'additional_dependencies for {lang}',\n )\n\n\ndef basic_get_default_version() -> str:\n return C.DEFAULT\n\n\ndef basic_healthy(prefix: Prefix, language_version: str) -> bool:\n return True\n\n\ndef no_install(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> 'NoReturn':\n raise AssertionError('This type is not installable')\n\n\ndef target_concurrency(hook: Hook) -> int:\n if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:\n return 1\n else:\n # Travis appears to have a bunch of CPUs, but we can't use them all.\n if 'TRAVIS' in os.environ:\n return 2\n else:\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n return 1\n\n\ndef _shuffled(seq: Sequence[str]) -> List[str]:\n \"\"\"Deterministically shuffle\"\"\"\n fixed_random = random.Random()\n fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n\n seq = list(seq)\n random.shuffle(seq, random=fixed_random.random)\n return seq\n\n\ndef run_xargs(\n hook: Hook,\n cmd: Tuple[str, ...],\n file_args: Sequence[str],\n **kwargs: Any,\n) -> Tuple[int, bytes]:\n # Shuffle the files so that they more evenly fill out the xargs partitions,\n # but do it deterministically in case a hook cares about ordering.\n file_args = _shuffled(file_args)\n kwargs['target_concurrency'] = target_concurrency(hook)\n return xargs(cmd, file_args, **kwargs)\n", "path": "pre_commit/languages/helpers.py"}], "after_files": [{"content": "import multiprocessing\nimport os\nimport random\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\nfrom typing import overload\nfrom typing import Sequence\nfrom typing import Tuple\nfrom typing import TYPE_CHECKING\n\nimport pre_commit.constants as C\nfrom pre_commit.hook import Hook\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.xargs import xargs\n\nif TYPE_CHECKING:\n from typing import NoReturn\n\nFIXED_RANDOM_SEED = 1542676187\n\n\ndef run_setup_cmd(prefix: Prefix, cmd: Tuple[str, ...]) -> None:\n cmd_output_b(*cmd, cwd=prefix.prefix_dir)\n\n\n@overload\ndef environment_dir(d: None, language_version: str) -> None: ...\n@overload\ndef environment_dir(d: str, language_version: str) -> str: ...\n\n\ndef environment_dir(d: Optional[str], language_version: str) -> Optional[str]:\n if d is None:\n return None\n else:\n return f'{d}-{language_version}'\n\n\ndef assert_version_default(binary: str, version: str) -> None:\n if version != C.DEFAULT:\n raise AssertionError(\n f'For now, pre-commit requires system-installed {binary}',\n )\n\n\ndef assert_no_additional_deps(\n lang: str,\n additional_deps: Sequence[str],\n) -> None:\n if additional_deps:\n raise AssertionError(\n f'For now, pre-commit does not support '\n f'additional_dependencies for {lang}',\n )\n\n\ndef basic_get_default_version() -> str:\n return C.DEFAULT\n\n\ndef basic_healthy(prefix: Prefix, language_version: str) -> bool:\n return True\n\n\ndef no_install(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> 'NoReturn':\n raise AssertionError('This type is not installable')\n\n\ndef target_concurrency(hook: Hook) -> int:\n if hook.require_serial or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:\n return 1\n else:\n # Travis appears to have a bunch of CPUs, but we can't use them all.\n if 'TRAVIS' in os.environ:\n return 2\n else:\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n return 1\n\n\ndef _shuffled(seq: Sequence[str]) -> List[str]:\n \"\"\"Deterministically shuffle\"\"\"\n fixed_random = random.Random()\n fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n\n seq = list(seq)\n fixed_random.shuffle(seq)\n return seq\n\n\ndef run_xargs(\n hook: Hook,\n cmd: Tuple[str, ...],\n file_args: Sequence[str],\n **kwargs: Any,\n) -> Tuple[int, bytes]:\n # Shuffle the files so that they more evenly fill out the xargs partitions,\n # but do it deterministically in case a hook cares about ordering.\n file_args = _shuffled(file_args)\n kwargs['target_concurrency'] = target_concurrency(hook)\n return xargs(cmd, file_args, **kwargs)\n", "path": "pre_commit/languages/helpers.py"}]} | 1,278 | 174 |
gh_patches_debug_20264 | rasdani/github-patches | git_diff | svthalia__concrexit-3089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Admin sales shift API should also return total_paid_revenue
### Is your feature request related to a problem? Please describe.
The current admin sales shift api route only gives the total_revenue for a shift, but this might contain unpaid orders. We don't want those in certain scoreboards, like for the rag week.
### Describe the solution you'd like
Add `total_paid_revenue`
### Motivation
### Describe alternatives you've considered
### Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/sales/api/v2/admin/serializers/shift.py`
Content:
```
1 from rest_framework import serializers
2
3 from sales.models.product import ProductListItem
4 from sales.models.shift import Shift
5
6
7 class ProductListItemSerializer(serializers.ModelSerializer):
8 """Serializer for product list items."""
9
10 class Meta:
11 model = ProductListItem
12 fields = ("name", "price", "age_restricted")
13 read_only_fields = ("name", "price", "age_restricted")
14
15 name = serializers.SerializerMethodField("_name")
16 age_restricted = serializers.SerializerMethodField("_age_restricted")
17
18 def _name(self, instance):
19 return instance.product.name
20
21 def _age_restricted(self, instance):
22 return instance.product.age_restricted
23
24
25 class ShiftSerializer(serializers.ModelSerializer):
26 """Serializer for shifts."""
27
28 class Meta:
29 model = Shift
30 fields = (
31 "pk",
32 "title",
33 "locked",
34 "active",
35 "start",
36 "end",
37 "products",
38 "total_revenue",
39 "num_orders",
40 "product_sales",
41 )
42
43 total_revenue = serializers.DecimalField(
44 max_digits=10, decimal_places=2, min_value=0, read_only=True
45 )
46
47 products = ProductListItemSerializer(
48 source="product_list.product_items", many=True, read_only=True
49 )
50
51 title = serializers.SerializerMethodField("_get_title")
52
53 def _get_title(self, instance):
54 return instance.title
55
56 product_sales = serializers.JSONField()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/sales/api/v2/admin/serializers/shift.py b/website/sales/api/v2/admin/serializers/shift.py
--- a/website/sales/api/v2/admin/serializers/shift.py
+++ b/website/sales/api/v2/admin/serializers/shift.py
@@ -1,5 +1,6 @@
from rest_framework import serializers
+from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer
from sales.models.product import ProductListItem
from sales.models.shift import Shift
@@ -36,13 +37,13 @@
"end",
"products",
"total_revenue",
+ "total_revenue_paid",
"num_orders",
"product_sales",
)
- total_revenue = serializers.DecimalField(
- max_digits=10, decimal_places=2, min_value=0, read_only=True
- )
+ total_revenue = PaymentAmountSerializer(min_value=0, read_only=True)
+ total_revenue_paid = PaymentAmountSerializer(min_value=0, read_only=True)
products = ProductListItemSerializer(
source="product_list.product_items", many=True, read_only=True
| {"golden_diff": "diff --git a/website/sales/api/v2/admin/serializers/shift.py b/website/sales/api/v2/admin/serializers/shift.py\n--- a/website/sales/api/v2/admin/serializers/shift.py\n+++ b/website/sales/api/v2/admin/serializers/shift.py\n@@ -1,5 +1,6 @@\n from rest_framework import serializers\n \n+from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\n from sales.models.product import ProductListItem\n from sales.models.shift import Shift\n \n@@ -36,13 +37,13 @@\n \"end\",\n \"products\",\n \"total_revenue\",\n+ \"total_revenue_paid\",\n \"num_orders\",\n \"product_sales\",\n )\n \n- total_revenue = serializers.DecimalField(\n- max_digits=10, decimal_places=2, min_value=0, read_only=True\n- )\n+ total_revenue = PaymentAmountSerializer(min_value=0, read_only=True)\n+ total_revenue_paid = PaymentAmountSerializer(min_value=0, read_only=True)\n \n products = ProductListItemSerializer(\n source=\"product_list.product_items\", many=True, read_only=True\n", "issue": "Admin sales shift API should also return total_paid_revenue\n### Is your feature request related to a problem? Please describe.\r\nThe current admin sales shift api route only gives the total_revenue for a shift, but this might contain unpaid orders. We don't want those in certain scoreboards, like for the rag week.\r\n\r\n### Describe the solution you'd like\r\nAdd `total_paid_revenue`\r\n\r\n### Motivation\r\n\r\n### Describe alternatives you've considered\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom sales.models.product import ProductListItem\nfrom sales.models.shift import Shift\n\n\nclass ProductListItemSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for product list items.\"\"\"\n\n class Meta:\n model = ProductListItem\n fields = (\"name\", \"price\", \"age_restricted\")\n read_only_fields = (\"name\", \"price\", \"age_restricted\")\n\n name = serializers.SerializerMethodField(\"_name\")\n age_restricted = serializers.SerializerMethodField(\"_age_restricted\")\n\n def _name(self, instance):\n return instance.product.name\n\n def _age_restricted(self, instance):\n return instance.product.age_restricted\n\n\nclass ShiftSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for shifts.\"\"\"\n\n class Meta:\n model = Shift\n fields = (\n \"pk\",\n \"title\",\n \"locked\",\n \"active\",\n \"start\",\n \"end\",\n \"products\",\n \"total_revenue\",\n \"num_orders\",\n \"product_sales\",\n )\n\n total_revenue = serializers.DecimalField(\n max_digits=10, decimal_places=2, min_value=0, read_only=True\n )\n\n products = ProductListItemSerializer(\n source=\"product_list.product_items\", many=True, read_only=True\n )\n\n title = serializers.SerializerMethodField(\"_get_title\")\n\n def _get_title(self, instance):\n return instance.title\n\n product_sales = serializers.JSONField()\n", "path": "website/sales/api/v2/admin/serializers/shift.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom sales.models.product import ProductListItem\nfrom sales.models.shift import Shift\n\n\nclass ProductListItemSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for product list items.\"\"\"\n\n class Meta:\n model = ProductListItem\n fields = (\"name\", \"price\", \"age_restricted\")\n read_only_fields = (\"name\", \"price\", \"age_restricted\")\n\n name = serializers.SerializerMethodField(\"_name\")\n age_restricted = serializers.SerializerMethodField(\"_age_restricted\")\n\n def _name(self, instance):\n return instance.product.name\n\n def _age_restricted(self, instance):\n return instance.product.age_restricted\n\n\nclass ShiftSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for shifts.\"\"\"\n\n class Meta:\n model = Shift\n fields = (\n \"pk\",\n \"title\",\n \"locked\",\n \"active\",\n \"start\",\n \"end\",\n \"products\",\n \"total_revenue\",\n \"total_revenue_paid\",\n \"num_orders\",\n \"product_sales\",\n )\n\n total_revenue = PaymentAmountSerializer(min_value=0, read_only=True)\n total_revenue_paid = PaymentAmountSerializer(min_value=0, read_only=True)\n\n products = ProductListItemSerializer(\n source=\"product_list.product_items\", many=True, read_only=True\n )\n\n title = serializers.SerializerMethodField(\"_get_title\")\n\n def _get_title(self, instance):\n return instance.title\n\n product_sales = serializers.JSONField()\n", "path": "website/sales/api/v2/admin/serializers/shift.py"}]} | 775 | 257 |
gh_patches_debug_8616 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1271 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove duplicate docs generation
In `synth.py` we have a `nox` session to generate the docs [here](https://github.com/googleapis/google-api-python-client/blob/master/synth.py#L36). The same python script is running as part of the Github action in #1187, so we should remove the `docs` session from `synth.py` and `noxfile.py`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import sys
17
18 import nox
19
20 test_dependencies = [
21 "django>=2.0.0",
22 "google-auth",
23 "google-auth-httplib2",
24 "mox",
25 "parameterized",
26 "pyopenssl",
27 "pytest",
28 "pytest-cov",
29 "webtest",
30 "coverage",
31 "unittest2",
32 "mock",
33 ]
34
35
36 @nox.session(python=["3.7"])
37 def lint(session):
38 session.install("flake8")
39 session.run(
40 "flake8",
41 "googleapiclient",
42 "tests",
43 "--count",
44 "--select=E9,F63,F7,F82",
45 "--show-source",
46 "--statistics",
47 )
48
49
50 @nox.session(python=["3.6", "3.7", "3.8", "3.9"])
51 @nox.parametrize(
52 "oauth2client",
53 [
54 "oauth2client<2dev",
55 "oauth2client>=2,<=3dev",
56 "oauth2client>=3,<=4dev",
57 "oauth2client>=4,<=5dev",
58 ],
59 )
60 def unit(session, oauth2client):
61 session.install(*test_dependencies)
62 session.install(oauth2client)
63 session.install('.')
64
65 # Run py.test against the unit tests.
66 session.run(
67 "py.test",
68 "--quiet",
69 "--cov=googleapiclient",
70 "--cov=tests",
71 "--cov-append",
72 "--cov-config=.coveragerc",
73 "--cov-report=",
74 "--cov-fail-under=85",
75 "tests",
76 *session.posargs,
77 )
78
79
80 @nox.session(python="3.6")
81 def docs(session):
82 session.install('.')
83 session.run("python", "describe.py")
```
Path: `synth.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import synthtool as s
16 from synthtool import gcp
17
18
19 common = gcp.CommonTemplates()
20
21 # ----------------------------------------------------------------------------
22 # Add templated files
23 # ----------------------------------------------------------------------------
24 templated_files = common.py_library()
25
26 # Copy kokoro configs.
27 # Docs are excluded as repo docs cannot currently be generated using sphinx.
28 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])
29
30 # Also move issue templates
31 s.move(templated_files / '.github')
32
33 # ----------------------------------------------------------------------------
34 # Generate docs
35 # ----------------------------------------------------------------------------
36 s.shell.run(["nox", "-s", "docs"], hide_output=False)
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -75,9 +75,3 @@
"tests",
*session.posargs,
)
-
-
[email protected](python="3.6")
-def docs(session):
- session.install('.')
- session.run("python", "describe.py")
\ No newline at end of file
diff --git a/synth.py b/synth.py
--- a/synth.py
+++ b/synth.py
@@ -29,8 +29,3 @@
# Also move issue templates
s.move(templated_files / '.github')
-
-# ----------------------------------------------------------------------------
-# Generate docs
-# ----------------------------------------------------------------------------
-s.shell.run(["nox", "-s", "docs"], hide_output=False)
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -75,9 +75,3 @@\n \"tests\",\n *session.posargs,\n )\n-\n-\[email protected](python=\"3.6\")\n-def docs(session):\n- session.install('.')\n- session.run(\"python\", \"describe.py\")\n\\ No newline at end of file\ndiff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -29,8 +29,3 @@\n \n # Also move issue templates\n s.move(templated_files / '.github')\n-\n-# ----------------------------------------------------------------------------\n-# Generate docs\n-# ----------------------------------------------------------------------------\n-s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "issue": "Remove duplicate docs generation\nIn `synth.py` we have a `nox` session to generate the docs [here](https://github.com/googleapis/google-api-python-client/blob/master/synth.py#L36). The same python script is running as part of the Github action in #1187, so we should remove the `docs` session from `synth.py` and `noxfile.py`.\n", "before_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n session.install('.')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=\"3.6\")\ndef docs(session):\n session.install('.')\n session.run(\"python\", \"describe.py\")", "path": "noxfile.py"}, {"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n\n# ----------------------------------------------------------------------------\n# Generate docs\n# ----------------------------------------------------------------------------\ns.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "path": "synth.py"}], "after_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\n\ntest_dependencies = [\n \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n session.install('.')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n", "path": "noxfile.py"}, {"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n", "path": "synth.py"}]} | 1,347 | 174 |
gh_patches_debug_17236 | rasdani/github-patches | git_diff | pyca__cryptography-3638 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update release automation for new wheel builder
Once #3636 is merged we need to update the release automation to trigger the new wheel builder and download the artifacts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `release.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import getpass
8 import io
9 import os
10 import subprocess
11 import time
12
13 import click
14
15 from clint.textui.progress import Bar as ProgressBar
16
17 import requests
18
19
20 JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder"
21
22
23 def run(*args, **kwargs):
24 kwargs.setdefault("stderr", subprocess.STDOUT)
25 subprocess.check_output(list(args), **kwargs)
26
27
28 def wait_for_build_completed(session):
29 # Wait 20 seconds before actually checking if the build is complete, to
30 # ensure that it had time to really start.
31 time.sleep(20)
32 while True:
33 response = session.get(
34 "{0}/lastBuild/api/json/".format(JENKINS_URL),
35 headers={
36 "Accept": "application/json",
37 }
38 )
39 response.raise_for_status()
40 if not response.json()["building"]:
41 assert response.json()["result"] == "SUCCESS"
42 break
43 time.sleep(0.1)
44
45
46 def download_artifacts(session):
47 response = session.get(
48 "{0}/lastBuild/api/json/".format(JENKINS_URL),
49 headers={
50 "Accept": "application/json"
51 }
52 )
53 response.raise_for_status()
54 assert not response.json()["building"]
55 assert response.json()["result"] == "SUCCESS"
56
57 paths = []
58
59 last_build_number = response.json()["number"]
60 for run in response.json()["runs"]:
61 if run["number"] != last_build_number:
62 print(
63 "Skipping {0} as it is not from the latest build ({1})".format(
64 run["url"], last_build_number
65 )
66 )
67 continue
68
69 response = session.get(
70 run["url"] + "api/json/",
71 headers={
72 "Accept": "application/json",
73 }
74 )
75 response.raise_for_status()
76 for artifact in response.json()["artifacts"]:
77 response = session.get(
78 "{0}artifact/{1}".format(run["url"], artifact["relativePath"]),
79 stream=True
80 )
81 assert response.headers["content-length"]
82 print("Downloading {0}".format(artifact["fileName"]))
83 bar = ProgressBar(
84 expected_size=int(response.headers["content-length"]),
85 filled_char="="
86 )
87 content = io.BytesIO()
88 for data in response.iter_content(chunk_size=8192):
89 content.write(data)
90 bar.show(content.tell())
91 assert bar.expected_size == content.tell()
92 bar.done()
93 out_path = os.path.join(
94 os.path.dirname(__file__),
95 "dist",
96 artifact["fileName"],
97 )
98 with open(out_path, "wb") as f:
99 f.write(content.getvalue())
100 paths.append(out_path)
101 return paths
102
103
104 @click.command()
105 @click.argument("version")
106 def release(version):
107 """
108 ``version`` should be a string like '0.4' or '1.0'.
109 """
110 run("git", "tag", "-s", version, "-m", "{0} release".format(version))
111 run("git", "push", "--tags")
112
113 run("python", "setup.py", "sdist")
114 run("python", "setup.py", "sdist", "bdist_wheel", cwd="vectors/")
115
116 run(
117 "twine", "upload", "-s", "dist/cryptography-{0}*".format(version),
118 "vectors/dist/cryptography_vectors-{0}*".format(version), shell=True
119 )
120
121 session = requests.Session()
122
123 # This tells the CDN to delete the cached response for the URL. We do this
124 # so that the Jenkins builders will see the new sdist immediately when they
125 # go to build the wheels.
126 response = session.request(
127 "PURGE", "https://pypi.python.org/simple/cryptography/"
128 )
129 response.raise_for_status()
130
131 username = getpass.getpass("Input the GitHub/Jenkins username: ")
132 token = getpass.getpass("Input the Jenkins token: ")
133 response = session.post(
134 "{0}/build".format(JENKINS_URL),
135 auth=requests.auth.HTTPBasicAuth(
136 username, token
137 ),
138 params={
139 "cause": "Building wheels for {0}".format(version)
140 }
141 )
142 response.raise_for_status()
143 wait_for_build_completed(session)
144 paths = download_artifacts(session)
145 run("twine", "upload", " ".join(paths))
146
147
148 if __name__ == "__main__":
149 release()
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/release.py b/release.py
--- a/release.py
+++ b/release.py
@@ -17,7 +17,10 @@
import requests
-JENKINS_URL = "https://jenkins.cryptography.io/job/cryptography-wheel-builder"
+JENKINS_URL = (
+ "https://ci.cryptography.io/job/cryptography-support-jobs/"
+ "job/wheel-builder"
+)
def run(*args, **kwargs):
@@ -128,14 +131,11 @@
)
response.raise_for_status()
- username = getpass.getpass("Input the GitHub/Jenkins username: ")
token = getpass.getpass("Input the Jenkins token: ")
- response = session.post(
+ response = session.get(
"{0}/build".format(JENKINS_URL),
- auth=requests.auth.HTTPBasicAuth(
- username, token
- ),
params={
+ "token": token,
"cause": "Building wheels for {0}".format(version)
}
)
| {"golden_diff": "diff --git a/release.py b/release.py\n--- a/release.py\n+++ b/release.py\n@@ -17,7 +17,10 @@\n import requests\n \n \n-JENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n+JENKINS_URL = (\n+ \"https://ci.cryptography.io/job/cryptography-support-jobs/\"\n+ \"job/wheel-builder\"\n+)\n \n \n def run(*args, **kwargs):\n@@ -128,14 +131,11 @@\n )\n response.raise_for_status()\n \n- username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n- response = session.post(\n+ response = session.get(\n \"{0}/build\".format(JENKINS_URL),\n- auth=requests.auth.HTTPBasicAuth(\n- username, token\n- ),\n params={\n+ \"token\": token,\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n", "issue": "Update release automation for new wheel builder\nOnce #3636 is merged we need to update the release automation to trigger the new wheel builder and download the artifacts.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport io\nimport os\nimport subprocess\nimport time\n\nimport click\n\nfrom clint.textui.progress import Bar as ProgressBar\n\nimport requests\n\n\nJENKINS_URL = \"https://jenkins.cryptography.io/job/cryptography-wheel-builder\"\n\n\ndef run(*args, **kwargs):\n kwargs.setdefault(\"stderr\", subprocess.STDOUT)\n subprocess.check_output(list(args), **kwargs)\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n last_build_number = response.json()[\"number\"]\n for run in response.json()[\"runs\"]:\n if run[\"number\"] != last_build_number:\n print(\n \"Skipping {0} as it is not from the latest build ({1})\".format(\n run[\"url\"], last_build_number\n )\n )\n continue\n\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"]),\n stream=True\n )\n assert response.headers[\"content-length\"]\n print(\"Downloading {0}\".format(artifact[\"fileName\"]))\n bar = ProgressBar(\n expected_size=int(response.headers[\"content-length\"]),\n filled_char=\"=\"\n )\n content = io.BytesIO()\n for data in response.iter_content(chunk_size=8192):\n content.write(data)\n bar.show(content.tell())\n assert bar.expected_size == content.tell()\n bar.done()\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(content.getvalue())\n paths.append(out_path)\n return paths\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n run(\"python\", \"setup.py\", \"sdist\")\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n\n run(\n \"twine\", \"upload\", \"-s\", \"dist/cryptography-{0}*\".format(version),\n \"vectors/dist/cryptography_vectors-{0}*\".format(version), shell=True\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n username = getpass.getpass(\"Input the GitHub/Jenkins username: \")\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.post(\n \"{0}/build\".format(JENKINS_URL),\n auth=requests.auth.HTTPBasicAuth(\n username, token\n ),\n params={\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n run(\"twine\", \"upload\", \" \".join(paths))\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport getpass\nimport io\nimport os\nimport subprocess\nimport time\n\nimport click\n\nfrom clint.textui.progress import Bar as ProgressBar\n\nimport requests\n\n\nJENKINS_URL = (\n \"https://ci.cryptography.io/job/cryptography-support-jobs/\"\n \"job/wheel-builder\"\n)\n\n\ndef run(*args, **kwargs):\n kwargs.setdefault(\"stderr\", subprocess.STDOUT)\n subprocess.check_output(list(args), **kwargs)\n\n\ndef wait_for_build_completed(session):\n # Wait 20 seconds before actually checking if the build is complete, to\n # ensure that it had time to really start.\n time.sleep(20)\n while True:\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n if not response.json()[\"building\"]:\n assert response.json()[\"result\"] == \"SUCCESS\"\n break\n time.sleep(0.1)\n\n\ndef download_artifacts(session):\n response = session.get(\n \"{0}/lastBuild/api/json/\".format(JENKINS_URL),\n headers={\n \"Accept\": \"application/json\"\n }\n )\n response.raise_for_status()\n assert not response.json()[\"building\"]\n assert response.json()[\"result\"] == \"SUCCESS\"\n\n paths = []\n\n last_build_number = response.json()[\"number\"]\n for run in response.json()[\"runs\"]:\n if run[\"number\"] != last_build_number:\n print(\n \"Skipping {0} as it is not from the latest build ({1})\".format(\n run[\"url\"], last_build_number\n )\n )\n continue\n\n response = session.get(\n run[\"url\"] + \"api/json/\",\n headers={\n \"Accept\": \"application/json\",\n }\n )\n response.raise_for_status()\n for artifact in response.json()[\"artifacts\"]:\n response = session.get(\n \"{0}artifact/{1}\".format(run[\"url\"], artifact[\"relativePath\"]),\n stream=True\n )\n assert response.headers[\"content-length\"]\n print(\"Downloading {0}\".format(artifact[\"fileName\"]))\n bar = ProgressBar(\n expected_size=int(response.headers[\"content-length\"]),\n filled_char=\"=\"\n )\n content = io.BytesIO()\n for data in response.iter_content(chunk_size=8192):\n content.write(data)\n bar.show(content.tell())\n assert bar.expected_size == content.tell()\n bar.done()\n out_path = os.path.join(\n os.path.dirname(__file__),\n \"dist\",\n artifact[\"fileName\"],\n )\n with open(out_path, \"wb\") as f:\n f.write(content.getvalue())\n paths.append(out_path)\n return paths\n\n\[email protected]()\[email protected](\"version\")\ndef release(version):\n \"\"\"\n ``version`` should be a string like '0.4' or '1.0'.\n \"\"\"\n run(\"git\", \"tag\", \"-s\", version, \"-m\", \"{0} release\".format(version))\n run(\"git\", \"push\", \"--tags\")\n\n run(\"python\", \"setup.py\", \"sdist\")\n run(\"python\", \"setup.py\", \"sdist\", \"bdist_wheel\", cwd=\"vectors/\")\n\n run(\n \"twine\", \"upload\", \"-s\", \"dist/cryptography-{0}*\".format(version),\n \"vectors/dist/cryptography_vectors-{0}*\".format(version), shell=True\n )\n\n session = requests.Session()\n\n # This tells the CDN to delete the cached response for the URL. We do this\n # so that the Jenkins builders will see the new sdist immediately when they\n # go to build the wheels.\n response = session.request(\n \"PURGE\", \"https://pypi.python.org/simple/cryptography/\"\n )\n response.raise_for_status()\n\n token = getpass.getpass(\"Input the Jenkins token: \")\n response = session.get(\n \"{0}/build\".format(JENKINS_URL),\n params={\n \"token\": token,\n \"cause\": \"Building wheels for {0}\".format(version)\n }\n )\n response.raise_for_status()\n wait_for_build_completed(session)\n paths = download_artifacts(session)\n run(\"twine\", \"upload\", \" \".join(paths))\n\n\nif __name__ == \"__main__\":\n release()\n", "path": "release.py"}]} | 1,645 | 230 |
gh_patches_debug_24985 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Archive Serializers use `id` rather than `pk`
Some of our serializers use `id` rather than `pk`, for consistency we should only use one and that should be `pk`. Check the other serializers and see if this occurs elsewhere.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/archives/serializers.py`
Content:
```
1 from django.db.transaction import on_commit
2 from guardian.shortcuts import get_objects_for_user
3 from rest_framework import serializers
4 from rest_framework.fields import ReadOnlyField, URLField
5 from rest_framework.relations import HyperlinkedRelatedField
6
7 from grandchallenge.archives.models import Archive, ArchiveItem
8 from grandchallenge.archives.tasks import (
9 start_archive_item_update_tasks,
10 update_archive_item_update_kwargs,
11 )
12 from grandchallenge.components.serializers import (
13 ComponentInterfaceValuePostSerializer,
14 ComponentInterfaceValueSerializer,
15 )
16 from grandchallenge.hanging_protocols.serializers import (
17 HangingProtocolSerializer,
18 )
19
20
21 class ArchiveItemSerializer(serializers.ModelSerializer):
22 archive = HyperlinkedRelatedField(
23 read_only=True, view_name="api:archive-detail"
24 )
25 values = ComponentInterfaceValueSerializer(many=True)
26
27 class Meta:
28 model = ArchiveItem
29 fields = ("id", "archive", "values")
30
31
32 class ArchiveSerializer(serializers.ModelSerializer):
33 algorithms = HyperlinkedRelatedField(
34 read_only=True, many=True, view_name="api:algorithm-detail"
35 )
36 logo = URLField(source="logo.x20.url", read_only=True)
37 url = URLField(source="get_absolute_url", read_only=True)
38 # Include the read only name for legacy clients
39 name = ReadOnlyField()
40 hanging_protocol = HangingProtocolSerializer()
41
42 class Meta:
43 model = Archive
44 fields = (
45 "id",
46 "name",
47 "title",
48 "algorithms",
49 "logo",
50 "description",
51 "api_url",
52 "url",
53 "hanging_protocol",
54 "view_content",
55 )
56
57
58 class ArchiveItemPostSerializer(ArchiveItemSerializer):
59 archive = HyperlinkedRelatedField(
60 queryset=Archive.objects.none(),
61 view_name="api:archive-detail",
62 write_only=True,
63 )
64
65 def __init__(self, *args, **kwargs):
66 super().__init__(*args, **kwargs)
67 self.fields["values"] = ComponentInterfaceValuePostSerializer(
68 many=True, context=self.context
69 )
70
71 if "request" in self.context:
72 user = self.context["request"].user
73
74 self.fields["archive"].queryset = get_objects_for_user(
75 user, "archives.use_archive", accept_global_perms=False
76 )
77
78 def update(self, instance, validated_data):
79 civs = validated_data.pop("values")
80
81 civ_pks_to_remove = set()
82 civ_pks_to_add = set()
83 upload_pks = {}
84
85 for civ in civs:
86 interface = civ.pop("interface", None)
87 upload_session = civ.pop("upload_session", None)
88 value = civ.pop("value", None)
89 image = civ.pop("image", None)
90 user_upload = civ.pop("user_upload", None)
91
92 update_archive_item_update_kwargs(
93 instance=instance,
94 interface=interface,
95 value=value,
96 image=image,
97 user_upload=user_upload,
98 upload_session=upload_session,
99 civ_pks_to_add=civ_pks_to_add,
100 civ_pks_to_remove=civ_pks_to_remove,
101 upload_pks=upload_pks,
102 )
103
104 on_commit(
105 start_archive_item_update_tasks.signature(
106 kwargs={
107 "archive_item_pk": instance.pk,
108 "civ_pks_to_add": list(civ_pks_to_add),
109 "civ_pks_to_remove": list(civ_pks_to_remove),
110 "upload_pks": upload_pks,
111 }
112 ).apply_async
113 )
114
115 return instance
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/archives/serializers.py b/app/grandchallenge/archives/serializers.py
--- a/app/grandchallenge/archives/serializers.py
+++ b/app/grandchallenge/archives/serializers.py
@@ -11,7 +11,7 @@
)
from grandchallenge.components.serializers import (
ComponentInterfaceValuePostSerializer,
- ComponentInterfaceValueSerializer,
+ HyperlinkedComponentInterfaceValueSerializer,
)
from grandchallenge.hanging_protocols.serializers import (
HangingProtocolSerializer,
@@ -22,11 +22,11 @@
archive = HyperlinkedRelatedField(
read_only=True, view_name="api:archive-detail"
)
- values = ComponentInterfaceValueSerializer(many=True)
+ values = HyperlinkedComponentInterfaceValueSerializer(many=True)
class Meta:
model = ArchiveItem
- fields = ("id", "archive", "values")
+ fields = ("pk", "archive", "values")
class ArchiveSerializer(serializers.ModelSerializer):
@@ -42,7 +42,7 @@
class Meta:
model = Archive
fields = (
- "id",
+ "pk",
"name",
"title",
"algorithms",
| {"golden_diff": "diff --git a/app/grandchallenge/archives/serializers.py b/app/grandchallenge/archives/serializers.py\n--- a/app/grandchallenge/archives/serializers.py\n+++ b/app/grandchallenge/archives/serializers.py\n@@ -11,7 +11,7 @@\n )\n from grandchallenge.components.serializers import (\n ComponentInterfaceValuePostSerializer,\n- ComponentInterfaceValueSerializer,\n+ HyperlinkedComponentInterfaceValueSerializer,\n )\n from grandchallenge.hanging_protocols.serializers import (\n HangingProtocolSerializer,\n@@ -22,11 +22,11 @@\n archive = HyperlinkedRelatedField(\n read_only=True, view_name=\"api:archive-detail\"\n )\n- values = ComponentInterfaceValueSerializer(many=True)\n+ values = HyperlinkedComponentInterfaceValueSerializer(many=True)\n \n class Meta:\n model = ArchiveItem\n- fields = (\"id\", \"archive\", \"values\")\n+ fields = (\"pk\", \"archive\", \"values\")\n \n \n class ArchiveSerializer(serializers.ModelSerializer):\n@@ -42,7 +42,7 @@\n class Meta:\n model = Archive\n fields = (\n- \"id\",\n+ \"pk\",\n \"name\",\n \"title\",\n \"algorithms\",\n", "issue": "Archive Serializers use `id` rather than `pk`\nSome of our serializers use `id` rather than `pk`, for consistency we should only use one and that should be `pk`. Check the other serializers and see if this occurs elsewhere.\n", "before_files": [{"content": "from django.db.transaction import on_commit\nfrom guardian.shortcuts import get_objects_for_user\nfrom rest_framework import serializers\nfrom rest_framework.fields import ReadOnlyField, URLField\nfrom rest_framework.relations import HyperlinkedRelatedField\n\nfrom grandchallenge.archives.models import Archive, ArchiveItem\nfrom grandchallenge.archives.tasks import (\n start_archive_item_update_tasks,\n update_archive_item_update_kwargs,\n)\nfrom grandchallenge.components.serializers import (\n ComponentInterfaceValuePostSerializer,\n ComponentInterfaceValueSerializer,\n)\nfrom grandchallenge.hanging_protocols.serializers import (\n HangingProtocolSerializer,\n)\n\n\nclass ArchiveItemSerializer(serializers.ModelSerializer):\n archive = HyperlinkedRelatedField(\n read_only=True, view_name=\"api:archive-detail\"\n )\n values = ComponentInterfaceValueSerializer(many=True)\n\n class Meta:\n model = ArchiveItem\n fields = (\"id\", \"archive\", \"values\")\n\n\nclass ArchiveSerializer(serializers.ModelSerializer):\n algorithms = HyperlinkedRelatedField(\n read_only=True, many=True, view_name=\"api:algorithm-detail\"\n )\n logo = URLField(source=\"logo.x20.url\", read_only=True)\n url = URLField(source=\"get_absolute_url\", read_only=True)\n # Include the read only name for legacy clients\n name = ReadOnlyField()\n hanging_protocol = HangingProtocolSerializer()\n\n class Meta:\n model = Archive\n fields = (\n \"id\",\n \"name\",\n \"title\",\n \"algorithms\",\n \"logo\",\n \"description\",\n \"api_url\",\n \"url\",\n \"hanging_protocol\",\n \"view_content\",\n )\n\n\nclass ArchiveItemPostSerializer(ArchiveItemSerializer):\n archive = HyperlinkedRelatedField(\n queryset=Archive.objects.none(),\n view_name=\"api:archive-detail\",\n write_only=True,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"values\"] = ComponentInterfaceValuePostSerializer(\n many=True, context=self.context\n )\n\n if \"request\" in self.context:\n user = self.context[\"request\"].user\n\n self.fields[\"archive\"].queryset = get_objects_for_user(\n user, \"archives.use_archive\", accept_global_perms=False\n )\n\n def update(self, instance, validated_data):\n civs = validated_data.pop(\"values\")\n\n civ_pks_to_remove = set()\n civ_pks_to_add = set()\n upload_pks = {}\n\n for civ in civs:\n interface = civ.pop(\"interface\", None)\n upload_session = civ.pop(\"upload_session\", None)\n value = civ.pop(\"value\", None)\n image = civ.pop(\"image\", None)\n user_upload = civ.pop(\"user_upload\", None)\n\n update_archive_item_update_kwargs(\n instance=instance,\n interface=interface,\n value=value,\n image=image,\n user_upload=user_upload,\n upload_session=upload_session,\n civ_pks_to_add=civ_pks_to_add,\n civ_pks_to_remove=civ_pks_to_remove,\n upload_pks=upload_pks,\n )\n\n on_commit(\n start_archive_item_update_tasks.signature(\n kwargs={\n \"archive_item_pk\": instance.pk,\n \"civ_pks_to_add\": list(civ_pks_to_add),\n \"civ_pks_to_remove\": list(civ_pks_to_remove),\n \"upload_pks\": upload_pks,\n }\n ).apply_async\n )\n\n return instance\n", "path": "app/grandchallenge/archives/serializers.py"}], "after_files": [{"content": "from django.db.transaction import on_commit\nfrom guardian.shortcuts import get_objects_for_user\nfrom rest_framework import serializers\nfrom rest_framework.fields import ReadOnlyField, URLField\nfrom rest_framework.relations import HyperlinkedRelatedField\n\nfrom grandchallenge.archives.models import Archive, ArchiveItem\nfrom grandchallenge.archives.tasks import (\n start_archive_item_update_tasks,\n update_archive_item_update_kwargs,\n)\nfrom grandchallenge.components.serializers import (\n ComponentInterfaceValuePostSerializer,\n HyperlinkedComponentInterfaceValueSerializer,\n)\nfrom grandchallenge.hanging_protocols.serializers import (\n HangingProtocolSerializer,\n)\n\n\nclass ArchiveItemSerializer(serializers.ModelSerializer):\n archive = HyperlinkedRelatedField(\n read_only=True, view_name=\"api:archive-detail\"\n )\n values = HyperlinkedComponentInterfaceValueSerializer(many=True)\n\n class Meta:\n model = ArchiveItem\n fields = (\"pk\", \"archive\", \"values\")\n\n\nclass ArchiveSerializer(serializers.ModelSerializer):\n algorithms = HyperlinkedRelatedField(\n read_only=True, many=True, view_name=\"api:algorithm-detail\"\n )\n logo = URLField(source=\"logo.x20.url\", read_only=True)\n url = URLField(source=\"get_absolute_url\", read_only=True)\n # Include the read only name for legacy clients\n name = ReadOnlyField()\n hanging_protocol = HangingProtocolSerializer()\n\n class Meta:\n model = Archive\n fields = (\n \"pk\",\n \"name\",\n \"title\",\n \"algorithms\",\n \"logo\",\n \"description\",\n \"api_url\",\n \"url\",\n \"hanging_protocol\",\n \"view_content\",\n )\n\n\nclass ArchiveItemPostSerializer(ArchiveItemSerializer):\n archive = HyperlinkedRelatedField(\n queryset=Archive.objects.none(),\n view_name=\"api:archive-detail\",\n write_only=True,\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"values\"] = ComponentInterfaceValuePostSerializer(\n many=True, context=self.context\n )\n\n if \"request\" in self.context:\n user = self.context[\"request\"].user\n\n self.fields[\"archive\"].queryset = get_objects_for_user(\n user, \"archives.use_archive\", accept_global_perms=False\n )\n\n def update(self, instance, validated_data):\n civs = validated_data.pop(\"values\")\n\n civ_pks_to_remove = set()\n civ_pks_to_add = set()\n upload_pks = {}\n\n for civ in civs:\n interface = civ.pop(\"interface\", None)\n upload_session = civ.pop(\"upload_session\", None)\n value = civ.pop(\"value\", None)\n image = civ.pop(\"image\", None)\n user_upload = civ.pop(\"user_upload\", None)\n\n update_archive_item_update_kwargs(\n instance=instance,\n interface=interface,\n value=value,\n image=image,\n user_upload=user_upload,\n upload_session=upload_session,\n civ_pks_to_add=civ_pks_to_add,\n civ_pks_to_remove=civ_pks_to_remove,\n upload_pks=upload_pks,\n )\n\n on_commit(\n start_archive_item_update_tasks.signature(\n kwargs={\n \"archive_item_pk\": instance.pk,\n \"civ_pks_to_add\": list(civ_pks_to_add),\n \"civ_pks_to_remove\": list(civ_pks_to_remove),\n \"upload_pks\": upload_pks,\n }\n ).apply_async\n )\n\n return instance\n", "path": "app/grandchallenge/archives/serializers.py"}]} | 1,299 | 268 |
gh_patches_debug_32080 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-4789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Freeze.py screws up test running
The virtualenv that is left in requirments/ dir seems to interfere with normal operations so I always need to delete it, perhaps we need some ignore somewhere or need to place it elsewhere
```
../default/lib/python2.7/site-packages/py/_path/common.py:367: in visit
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen
for p in self.gen(subdir):
../default/lib/python2.7/site-packages/py/_path/common.py:406: in gen
if p.check(dir=1) and (rec is None or rec(p))])
../default/lib/python2.7/site-packages/_pytest/main.py:682: in _recurse
ihook = self.gethookproxy(path)
../default/lib/python2.7/site-packages/_pytest/main.py:587: in gethookproxy
my_conftestmodules = pm._getconftestmodules(fspath)
../default/lib/python2.7/site-packages/_pytest/config.py:339: in _getconftestmodules
mod = self._importconftest(conftestpath)
../default/lib/python2.7/site-packages/_pytest/config.py:375: in _importconftest
self.consider_conftest(mod)
../default/lib/python2.7/site-packages/_pytest/config.py:398: in consider_conftest
if self.register(conftestmodule, name=conftestmodule.__file__):
../default/lib/python2.7/site-packages/_pytest/config.py:250: in register
ret = super(PytestPluginManager, self).register(plugin, name)
../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:371: in register
hook._maybe_apply_history(hookimpl)
../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:768: in _maybe_apply_history
res = self._hookexec(self, [method], kwargs)
../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:339: in _hookexec
return self._inner_hookexec(hook, methods, kwargs)
../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:334: in <lambda>
_MultiCall(methods, kwargs, hook.spec_opts).execute()
../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:614: in execute
res = hook_impl.function(*args)
requirements/temporary_venv/lib/python2.7/site-packages/tests/contrib/appengine/conftest.py:45: in pytest_configure
if config.getoption('gae_sdk') is not None:
../default/lib/python2.7/site-packages/_pytest/config.py:1195: in getoption
raise ValueError("no option named %r" % (name,))
E ValueError: no option named 'gae_sdk'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `requirements/freeze.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 outputs the frozen packages
4 """
5 import sys
6 import os
7 import argparse
8 import subprocess
9 parser = argparse.ArgumentParser(description=__doc__.strip())
10 parser.add_argument('--venv', default='requirements/temporary_venv')
11 parser.add_argument(
12 "--template", default="requirements/template.txt",)
13 parser.add_argument(
14 "--out", default=sys.stdout, type=argparse.FileType('w'),
15 help='the file where packages should be written to')
16
17
18 def main(args):
19 if not os.path.isdir(args.venv):
20 subprocess.check_call([
21 sys.executable, '-m', 'virtualenv', args.venv
22 ])
23 subprocess.check_call([
24 os.path.join(args.venv, 'bin/pip'),
25 'install', '-U', '-r', args.template])
26
27 subprocess.check_call([
28 os.path.join(args.venv, 'bin/pip'), 'freeze'
29 ], stdout=args.out)
30
31
32 if __name__ == '__main__':
33 main(parser.parse_args())
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/requirements/freeze.py b/requirements/freeze.py
--- a/requirements/freeze.py
+++ b/requirements/freeze.py
@@ -2,31 +2,52 @@
"""
outputs the frozen packages
"""
+from __future__ import print_function
import sys
import os
import argparse
import subprocess
+import tempfile
+import shutil
parser = argparse.ArgumentParser(description=__doc__.strip())
-parser.add_argument('--venv', default='requirements/temporary_venv')
+parser.add_argument('--venv', default=None)
+parser.add_argument('--keep-venv', action='store_true')
parser.add_argument(
"--template", default="requirements/template.txt",)
parser.add_argument(
- "--out", default=sys.stdout, type=argparse.FileType('w'),
+ "--out", default=None,
help='the file where packages should be written to')
def main(args):
- if not os.path.isdir(args.venv):
+ if args.venv is None:
+ args.venv = tempfile.mkdtemp(suffix='-miq-QE-rebuild-venv')
+
+ try:
+ if not os.path.isdir(os.path.join(args.venv, 'bin')):
+ subprocess.check_call([
+ sys.executable, '-m', 'virtualenv', args.venv
+ ])
subprocess.check_call([
- sys.executable, '-m', 'virtualenv', args.venv
- ])
- subprocess.check_call([
- os.path.join(args.venv, 'bin/pip'),
- 'install', '-U', '-r', args.template])
+ os.path.join(args.venv, 'bin/pip'),
+ 'install', '-U', '-r', args.template])
+
+ if args.out is None:
+ subprocess.check_call([
+ os.path.join(args.venv, 'bin/pip'), 'freeze'
+ ], stdout=sys.stdout)
+ else:
+ with open(args.out) as out:
+ subprocess.check_call([
+ os.path.join(args.venv, 'bin/pip'), 'freeze'
+ ], stdout=out)
- subprocess.check_call([
- os.path.join(args.venv, 'bin/pip'), 'freeze'
- ], stdout=args.out)
+ subprocess.check_call([
+ os.path.join(args.venv, 'bin/pip'), 'freeze'
+ ], stdout=args.out)
+ finally:
+ if not args.keep_venv:
+ shutil.rmtree(args.venv)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/requirements/freeze.py b/requirements/freeze.py\n--- a/requirements/freeze.py\n+++ b/requirements/freeze.py\n@@ -2,31 +2,52 @@\n \"\"\"\n outputs the frozen packages\n \"\"\"\n+from __future__ import print_function\n import sys\n import os\n import argparse\n import subprocess\n+import tempfile\n+import shutil\n parser = argparse.ArgumentParser(description=__doc__.strip())\n-parser.add_argument('--venv', default='requirements/temporary_venv')\n+parser.add_argument('--venv', default=None)\n+parser.add_argument('--keep-venv', action='store_true')\n parser.add_argument(\n \"--template\", default=\"requirements/template.txt\",)\n parser.add_argument(\n- \"--out\", default=sys.stdout, type=argparse.FileType('w'),\n+ \"--out\", default=None,\n help='the file where packages should be written to')\n \n \n def main(args):\n- if not os.path.isdir(args.venv):\n+ if args.venv is None:\n+ args.venv = tempfile.mkdtemp(suffix='-miq-QE-rebuild-venv')\n+\n+ try:\n+ if not os.path.isdir(os.path.join(args.venv, 'bin')):\n+ subprocess.check_call([\n+ sys.executable, '-m', 'virtualenv', args.venv\n+ ])\n subprocess.check_call([\n- sys.executable, '-m', 'virtualenv', args.venv\n- ])\n- subprocess.check_call([\n- os.path.join(args.venv, 'bin/pip'),\n- 'install', '-U', '-r', args.template])\n+ os.path.join(args.venv, 'bin/pip'),\n+ 'install', '-U', '-r', args.template])\n+\n+ if args.out is None:\n+ subprocess.check_call([\n+ os.path.join(args.venv, 'bin/pip'), 'freeze'\n+ ], stdout=sys.stdout)\n+ else:\n+ with open(args.out) as out:\n+ subprocess.check_call([\n+ os.path.join(args.venv, 'bin/pip'), 'freeze'\n+ ], stdout=out)\n \n- subprocess.check_call([\n- os.path.join(args.venv, 'bin/pip'), 'freeze'\n- ], stdout=args.out)\n+ subprocess.check_call([\n+ os.path.join(args.venv, 'bin/pip'), 'freeze'\n+ ], stdout=args.out)\n+ finally:\n+ if not args.keep_venv:\n+ shutil.rmtree(args.venv)\n \n \n if __name__ == '__main__':\n", "issue": "Freeze.py screws up test running\nThe virtualenv that is left in requirments/ dir seems to interfere with normal operations so I always need to delete it, perhaps we need some ignore somewhere or need to place it elsewhere\r\n\r\n```\r\n../default/lib/python2.7/site-packages/py/_path/common.py:367: in visit\r\n for x in Visitor(fil, rec, ignore, bf, sort).gen(self):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:416: in gen\r\n for p in self.gen(subdir):\r\n../default/lib/python2.7/site-packages/py/_path/common.py:406: in gen\r\n if p.check(dir=1) and (rec is None or rec(p))])\r\n../default/lib/python2.7/site-packages/_pytest/main.py:682: in _recurse\r\n ihook = self.gethookproxy(path)\r\n../default/lib/python2.7/site-packages/_pytest/main.py:587: in gethookproxy\r\n my_conftestmodules = pm._getconftestmodules(fspath)\r\n../default/lib/python2.7/site-packages/_pytest/config.py:339: in _getconftestmodules\r\n mod = self._importconftest(conftestpath)\r\n../default/lib/python2.7/site-packages/_pytest/config.py:375: in _importconftest\r\n self.consider_conftest(mod)\r\n../default/lib/python2.7/site-packages/_pytest/config.py:398: in consider_conftest\r\n if self.register(conftestmodule, name=conftestmodule.__file__):\r\n../default/lib/python2.7/site-packages/_pytest/config.py:250: in register\r\n ret = super(PytestPluginManager, self).register(plugin, name)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:371: in register\r\n hook._maybe_apply_history(hookimpl)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:768: in _maybe_apply_history\r\n res = self._hookexec(self, [method], kwargs)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:339: in _hookexec\r\n return self._inner_hookexec(hook, methods, kwargs)\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:334: in <lambda>\r\n _MultiCall(methods, kwargs, hook.spec_opts).execute()\r\n../default/lib/python2.7/site-packages/_pytest/vendored_packages/pluggy.py:614: in execute\r\n res = hook_impl.function(*args)\r\nrequirements/temporary_venv/lib/python2.7/site-packages/tests/contrib/appengine/conftest.py:45: in pytest_configure\r\n if config.getoption('gae_sdk') is not None:\r\n../default/lib/python2.7/site-packages/_pytest/config.py:1195: in getoption\r\n raise ValueError(\"no option named %r\" % (name,))\r\nE ValueError: no option named 'gae_sdk'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\noutputs the frozen packages\n\"\"\"\nimport sys\nimport os\nimport argparse\nimport subprocess\nparser = argparse.ArgumentParser(description=__doc__.strip())\nparser.add_argument('--venv', default='requirements/temporary_venv')\nparser.add_argument(\n \"--template\", default=\"requirements/template.txt\",)\nparser.add_argument(\n \"--out\", default=sys.stdout, type=argparse.FileType('w'),\n help='the file where packages should be written to')\n\n\ndef main(args):\n if not os.path.isdir(args.venv):\n subprocess.check_call([\n sys.executable, '-m', 'virtualenv', args.venv\n ])\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'),\n 'install', '-U', '-r', args.template])\n\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'), 'freeze'\n ], stdout=args.out)\n\n\nif __name__ == '__main__':\n main(parser.parse_args())\n", "path": "requirements/freeze.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\noutputs the frozen packages\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport os\nimport argparse\nimport subprocess\nimport tempfile\nimport shutil\nparser = argparse.ArgumentParser(description=__doc__.strip())\nparser.add_argument('--venv', default=None)\nparser.add_argument('--keep-venv', action='store_true')\nparser.add_argument(\n \"--template\", default=\"requirements/template.txt\",)\nparser.add_argument(\n \"--out\", default=None,\n help='the file where packages should be written to')\n\n\ndef main(args):\n if args.venv is None:\n args.venv = tempfile.mkdtemp(suffix='-miq-QE-rebuild-venv')\n\n try:\n if not os.path.isdir(os.path.join(args.venv, 'bin')):\n subprocess.check_call([\n sys.executable, '-m', 'virtualenv', args.venv\n ])\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'),\n 'install', '-U', '-r', args.template])\n\n if args.out is None:\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'), 'freeze'\n ], stdout=sys.stdout)\n else:\n with open(args.out) as out:\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'), 'freeze'\n ], stdout=out)\n\n subprocess.check_call([\n os.path.join(args.venv, 'bin/pip'), 'freeze'\n ], stdout=args.out)\n finally:\n if not args.keep_venv:\n shutil.rmtree(args.venv)\n\n\nif __name__ == '__main__':\n main(parser.parse_args())\n", "path": "requirements/freeze.py"}]} | 1,400 | 558 |
gh_patches_debug_61039 | rasdani/github-patches | git_diff | google-research__text-to-text-transfer-transformer-327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue Running T5 in colab TPU
Hi Team,
I was trying to do a pre training of T5 from scratch on colab. I could see if i install t5 using (pip install t5[gcp]), and tried to connect to execute ` tf.tpu.experimental.initialize_tpu_system(tpu)`, getting below error.
`InvalidArgumentError: NodeDef expected inputs 'string' do not match 0 inputs specified; Op<name=_Send; signature=tensor:T -> ; attr=T:type; attr=tensor_name:string; attr=send_device:string; attr=send_device_incarnation:int; attr=recv_device:string; attr=client_terminated:bool,default=false; is_stateful=true>; NodeDef: {{node _Send}}`
If install/ upgrade tensorflow, it gets resolved, however import of t5 does not work as below.
`
import t5`
`NotFoundError: /usr/local/lib/python3.6/dist-packages/tensorflow_text/python/metrics/_text_similarity_metric_ops.so: undefined symbol: _ZN10tensorflow14kernel_factory17OpKernelRegistrar12InitInternalEPKNS_9KernelDefEN4absl11string_viewESt10unique_ptrINS0_15OpKernelFactoryESt14default_deleteIS8_EE`
Please let me know how if there is a way to resolve this.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2020 The T5 Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Install T5."""
16
17 import os
18 import sys
19 import setuptools
20
21 # To enable importing version.py directly, we add its path to sys.path.
22 version_path = os.path.join(os.path.dirname(__file__), 't5')
23 sys.path.append(version_path)
24 from version import __version__ # pylint: disable=g-import-not-at-top
25
26 # Get the long description from the README file.
27 with open('README.md') as fp:
28 _LONG_DESCRIPTION = fp.read()
29
30 setuptools.setup(
31 name='t5',
32 version=__version__,
33 description='Text-to-text transfer transformer',
34 long_description=_LONG_DESCRIPTION,
35 long_description_content_type='text/markdown',
36 author='Google Inc.',
37 author_email='[email protected]',
38 url='http://github.com/google-research/text-to-text-transfer-transformer',
39 license='Apache 2.0',
40 packages=setuptools.find_packages(),
41 package_data={
42 '': ['*.gin'],
43 },
44 scripts=[],
45 install_requires=[
46 'absl-py',
47 'babel',
48 'gin-config',
49 'mesh-tensorflow[transformer]>=0.1.13',
50 'nltk',
51 'numpy',
52 'pandas',
53 'rouge-score',
54 'sacrebleu',
55 'scikit-learn',
56 'scipy',
57 'sentencepiece',
58 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.
59 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved.
60 'tfds-nightly',
61 'torch',
62 'transformers>=2.7.0',
63 ],
64 extras_require={
65 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',
66 'google-cloud-storage', 'oauth2client'],
67 'cache-tasks': ['apache-beam'],
68 'test': ['pytest'],
69 },
70 entry_points={
71 'console_scripts': [
72 't5_mesh_transformer = t5.models.mesh_transformer_main:console_entry_point',
73 't5_cache_tasks = t5.data.cache_tasks_main:console_entry_point'
74 ],
75 },
76 classifiers=[
77 'Development Status :: 4 - Beta',
78 'Intended Audience :: Developers',
79 'Intended Audience :: Science/Research',
80 'License :: OSI Approved :: Apache Software License',
81 'Topic :: Scientific/Engineering :: Artificial Intelligence',
82 ],
83 keywords='text nlp machinelearning',
84 )
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@
'scipy',
'sentencepiece',
'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.
- 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved.
+ 'tensorflow-text',
'tfds-nightly',
'torch',
'transformers>=2.7.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,7 +56,7 @@\n 'scipy',\n 'sentencepiece',\n 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.\n- 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved.\n+ 'tensorflow-text',\n 'tfds-nightly',\n 'torch',\n 'transformers>=2.7.0',\n", "issue": "Issue Running T5 in colab TPU\nHi Team,\r\n\r\nI was trying to do a pre training of T5 from scratch on colab. I could see if i install t5 using (pip install t5[gcp]), and tried to connect to execute ` tf.tpu.experimental.initialize_tpu_system(tpu)`, getting below error.\r\n\r\n`InvalidArgumentError: NodeDef expected inputs 'string' do not match 0 inputs specified; Op<name=_Send; signature=tensor:T -> ; attr=T:type; attr=tensor_name:string; attr=send_device:string; attr=send_device_incarnation:int; attr=recv_device:string; attr=client_terminated:bool,default=false; is_stateful=true>; NodeDef: {{node _Send}}`\r\n\r\nIf install/ upgrade tensorflow, it gets resolved, however import of t5 does not work as below.\r\n`\r\nimport t5`\r\n\r\n`NotFoundError: /usr/local/lib/python3.6/dist-packages/tensorflow_text/python/metrics/_text_similarity_metric_ops.so: undefined symbol: _ZN10tensorflow14kernel_factory17OpKernelRegistrar12InitInternalEPKNS_9KernelDefEN4absl11string_viewESt10unique_ptrINS0_15OpKernelFactoryESt14default_deleteIS8_EE`\r\n\r\nPlease let me know how if there is a way to resolve this.\r\nThanks.\r\n\n", "before_files": [{"content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\nsetuptools.setup(\n name='t5',\n version=__version__,\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/text-to-text-transfer-transformer',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['*.gin'],\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'babel',\n 'gin-config',\n 'mesh-tensorflow[transformer]>=0.1.13',\n 'nltk',\n 'numpy',\n 'pandas',\n 'rouge-score',\n 'sacrebleu',\n 'scikit-learn',\n 'scipy',\n 'sentencepiece',\n 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.\n 'tensorflow-text<2.3', # TODO(adarob): Unpin once #320 is resolved.\n 'tfds-nightly',\n 'torch',\n 'transformers>=2.7.0',\n ],\n extras_require={\n 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'],\n 'cache-tasks': ['apache-beam'],\n 'test': ['pytest'],\n },\n entry_points={\n 'console_scripts': [\n 't5_mesh_transformer = t5.models.mesh_transformer_main:console_entry_point',\n 't5_cache_tasks = t5.data.cache_tasks_main:console_entry_point'\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2020 The T5 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Install T5.\"\"\"\n\nimport os\nimport sys\nimport setuptools\n\n# To enable importing version.py directly, we add its path to sys.path.\nversion_path = os.path.join(os.path.dirname(__file__), 't5')\nsys.path.append(version_path)\nfrom version import __version__ # pylint: disable=g-import-not-at-top\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\nsetuptools.setup(\n name='t5',\n version=__version__,\n description='Text-to-text transfer transformer',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n author='Google Inc.',\n author_email='[email protected]',\n url='http://github.com/google-research/text-to-text-transfer-transformer',\n license='Apache 2.0',\n packages=setuptools.find_packages(),\n package_data={\n '': ['*.gin'],\n },\n scripts=[],\n install_requires=[\n 'absl-py',\n 'babel',\n 'gin-config',\n 'mesh-tensorflow[transformer]>=0.1.13',\n 'nltk',\n 'numpy',\n 'pandas',\n 'rouge-score',\n 'sacrebleu',\n 'scikit-learn',\n 'scipy',\n 'sentencepiece',\n 'six>=1.14', # TODO(adarob): Remove once rouge-score is updated.\n 'tensorflow-text',\n 'tfds-nightly',\n 'torch',\n 'transformers>=2.7.0',\n ],\n extras_require={\n 'gcp': ['gevent', 'google-api-python-client', 'google-compute-engine',\n 'google-cloud-storage', 'oauth2client'],\n 'cache-tasks': ['apache-beam'],\n 'test': ['pytest'],\n },\n entry_points={\n 'console_scripts': [\n 't5_mesh_transformer = t5.models.mesh_transformer_main:console_entry_point',\n 't5_cache_tasks = t5.data.cache_tasks_main:console_entry_point'\n ],\n },\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n ],\n keywords='text nlp machinelearning',\n)\n", "path": "setup.py"}]} | 1,376 | 120 |
gh_patches_debug_12711 | rasdani/github-patches | git_diff | conda__conda-6221 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add flag to build environment.yml without build strings
https://gitter.im/conda/conda?at=59ef54ebe44c43700a70e9a4
https://twitter.com/drvinceknight/status/922837449092542464?ref_src=twsrc%5Etfw
> Due to hashes of packages being introduced in `envinronment.yml` I'm getting all sorts of issues with building envs from file. (Very new problem)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_env/env.py`
Content:
```
1 from __future__ import absolute_import, print_function
2
3 import os
4 from collections import OrderedDict
5 from conda.base.context import context
6 from conda.cli import common # TODO: this should never have to import form conda.cli
7 from conda.core.linked_data import linked
8 from copy import copy
9 from itertools import chain
10
11 from . import compat, exceptions, yaml
12 from .pip_util import add_pip_installed
13
14 def load_from_directory(directory):
15 """Load and return an ``Environment`` from a given ``directory``"""
16 files = ['environment.yml', 'environment.yaml']
17 while True:
18 for f in files:
19 try:
20 return from_file(os.path.join(directory, f))
21 except exceptions.EnvironmentFileNotFound:
22 pass
23 old_directory = directory
24 directory = os.path.dirname(directory)
25 if directory == old_directory:
26 break
27 raise exceptions.EnvironmentFileNotFound(files[0])
28
29
30 # TODO This should lean more on conda instead of divining it from the outside
31 # TODO tests!!!
32 def from_environment(name, prefix, no_builds=False, ignore_channels=False):
33 """
34 Get environment object from prefix
35 Args:
36 name: The name of environment
37 prefix: The path of prefix
38 no_builds: Whether has build requirement
39 ignore_channels: whether ignore_channels
40
41 Returns: Environment object
42 """
43 installed = linked(prefix, ignore_channels=ignore_channels)
44 conda_pkgs = copy(installed)
45 # json=True hides the output, data is added to installed
46 add_pip_installed(prefix, installed, json=True)
47
48 pip_pkgs = sorted(installed - conda_pkgs)
49
50 if no_builds:
51 dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]
52 else:
53 dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]
54 if len(pip_pkgs) > 0:
55 dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})
56 # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq
57 # this doesn't dump correctly using pyyaml
58 channels = list(context.channels)
59 if not ignore_channels:
60 for dist in conda_pkgs:
61 if dist.channel not in channels:
62 channels.insert(0, dist.channel)
63 return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)
64
65
66 def from_yaml(yamlstr, **kwargs):
67 """Load and return a ``Environment`` from a given ``yaml string``"""
68 data = yaml.load(yamlstr)
69 if kwargs is not None:
70 for key, value in kwargs.items():
71 data[key] = value
72 return Environment(**data)
73
74
75 def from_file(filename):
76 if not os.path.exists(filename):
77 raise exceptions.EnvironmentFileNotFound(filename)
78 with open(filename, 'r') as fp:
79 yamlstr = fp.read()
80 return from_yaml(yamlstr, filename=filename)
81
82
83 # TODO test explicitly
84 class Dependencies(OrderedDict):
85 def __init__(self, raw, *args, **kwargs):
86 super(Dependencies, self).__init__(*args, **kwargs)
87 self.raw = raw
88 self.parse()
89
90 def parse(self):
91 if not self.raw:
92 return
93
94 self.update({'conda': []})
95
96 for line in self.raw:
97 if isinstance(line, dict):
98 self.update(line)
99 else:
100 self['conda'].append(common.arg2spec(line))
101
102 # TODO only append when it's not already present
103 def add(self, package_name):
104 self.raw.append(package_name)
105 self.parse()
106
107
108 def unique(seq, key=None):
109 """ Return only unique elements of a sequence
110 >>> tuple(unique((1, 2, 3)))
111 (1, 2, 3)
112 >>> tuple(unique((1, 2, 1, 3)))
113 (1, 2, 3)
114 Uniqueness can be defined by key keyword
115 >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))
116 ('cat', 'mouse')
117 """
118 seen = set()
119 seen_add = seen.add
120 if key is None:
121 for item in seq:
122 if item not in seen:
123 seen_add(item)
124 yield item
125 else: # calculate key
126 for item in seq:
127 val = key(item)
128 if val not in seen:
129 seen_add(val)
130 yield item
131
132
133 class Environment(object):
134 def __init__(self, name=None, filename=None, channels=None,
135 dependencies=None, prefix=None):
136 self.name = name
137 self.filename = filename
138 self.prefix = prefix
139 self.dependencies = Dependencies(dependencies)
140
141 if channels is None:
142 channels = []
143 self.channels = channels
144
145 def add_channels(self, channels):
146 self.channels = list(unique(chain.from_iterable((channels, self.channels))))
147
148 def remove_channels(self):
149 self.channels = []
150
151 def to_dict(self):
152 d = yaml.dict([('name', self.name)])
153 if self.channels:
154 d['channels'] = self.channels
155 if self.dependencies:
156 d['dependencies'] = self.dependencies.raw
157 if self.prefix:
158 d['prefix'] = self.prefix
159 return d
160
161 def to_yaml(self, stream=None):
162 d = self.to_dict()
163 out = compat.u(yaml.dump(d, default_flow_style=False))
164 if stream is None:
165 return out
166 stream.write(compat.b(out, encoding="utf-8"))
167
168 def save(self):
169 with open(self.filename, "wb") as fp:
170 self.to_yaml(stream=fp)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_env/env.py b/conda_env/env.py
--- a/conda_env/env.py
+++ b/conda_env/env.py
@@ -48,9 +48,9 @@
pip_pkgs = sorted(installed - conda_pkgs)
if no_builds:
- dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]
+ dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]
else:
- dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]
+ dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]
if len(pip_pkgs) > 0:
dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})
# conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq
| {"golden_diff": "diff --git a/conda_env/env.py b/conda_env/env.py\n--- a/conda_env/env.py\n+++ b/conda_env/env.py\n@@ -48,9 +48,9 @@\n pip_pkgs = sorted(installed - conda_pkgs)\n \n if no_builds:\n- dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n+ dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]\n else:\n- dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n+ dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n", "issue": "Add flag to build environment.yml without build strings\nhttps://gitter.im/conda/conda?at=59ef54ebe44c43700a70e9a4\r\nhttps://twitter.com/drvinceknight/status/922837449092542464?ref_src=twsrc%5Etfw\r\n\r\n> Due to hashes of packages being introduced in `envinronment.yml` I'm getting all sorts of issues with building envs from file. (Very new problem)\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport os\nfrom collections import OrderedDict\nfrom conda.base.context import context\nfrom conda.cli import common # TODO: this should never have to import form conda.cli\nfrom conda.core.linked_data import linked\nfrom copy import copy\nfrom itertools import chain\n\nfrom . import compat, exceptions, yaml\nfrom .pip_util import add_pip_installed\n\ndef load_from_directory(directory):\n \"\"\"Load and return an ``Environment`` from a given ``directory``\"\"\"\n files = ['environment.yml', 'environment.yaml']\n while True:\n for f in files:\n try:\n return from_file(os.path.join(directory, f))\n except exceptions.EnvironmentFileNotFound:\n pass\n old_directory = directory\n directory = os.path.dirname(directory)\n if directory == old_directory:\n break\n raise exceptions.EnvironmentFileNotFound(files[0])\n\n\n# TODO This should lean more on conda instead of divining it from the outside\n# TODO tests!!!\ndef from_environment(name, prefix, no_builds=False, ignore_channels=False):\n \"\"\"\n Get environment object from prefix\n Args:\n name: The name of environment\n prefix: The path of prefix\n no_builds: Whether has build requirement\n ignore_channels: whether ignore_channels\n\n Returns: Environment object\n \"\"\"\n installed = linked(prefix, ignore_channels=ignore_channels)\n conda_pkgs = copy(installed)\n # json=True hides the output, data is added to installed\n add_pip_installed(prefix, installed, json=True)\n\n pip_pkgs = sorted(installed - conda_pkgs)\n\n if no_builds:\n dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n else:\n dependencies = ['='.join(a.quad[0:3]) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n # this doesn't dump correctly using pyyaml\n channels = list(context.channels)\n if not ignore_channels:\n for dist in conda_pkgs:\n if dist.channel not in channels:\n channels.insert(0, dist.channel)\n return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)\n\n\ndef from_yaml(yamlstr, **kwargs):\n \"\"\"Load and return a ``Environment`` from a given ``yaml string``\"\"\"\n data = yaml.load(yamlstr)\n if kwargs is not None:\n for key, value in kwargs.items():\n data[key] = value\n return Environment(**data)\n\n\ndef from_file(filename):\n if not os.path.exists(filename):\n raise exceptions.EnvironmentFileNotFound(filename)\n with open(filename, 'r') as fp:\n yamlstr = fp.read()\n return from_yaml(yamlstr, filename=filename)\n\n\n# TODO test explicitly\nclass Dependencies(OrderedDict):\n def __init__(self, raw, *args, **kwargs):\n super(Dependencies, self).__init__(*args, **kwargs)\n self.raw = raw\n self.parse()\n\n def parse(self):\n if not self.raw:\n return\n\n self.update({'conda': []})\n\n for line in self.raw:\n if isinstance(line, dict):\n self.update(line)\n else:\n self['conda'].append(common.arg2spec(line))\n\n # TODO only append when it's not already present\n def add(self, package_name):\n self.raw.append(package_name)\n self.parse()\n\n\ndef unique(seq, key=None):\n \"\"\" Return only unique elements of a sequence\n >>> tuple(unique((1, 2, 3)))\n (1, 2, 3)\n >>> tuple(unique((1, 2, 1, 3)))\n (1, 2, 3)\n Uniqueness can be defined by key keyword\n >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))\n ('cat', 'mouse')\n \"\"\"\n seen = set()\n seen_add = seen.add\n if key is None:\n for item in seq:\n if item not in seen:\n seen_add(item)\n yield item\n else: # calculate key\n for item in seq:\n val = key(item)\n if val not in seen:\n seen_add(val)\n yield item\n\n\nclass Environment(object):\n def __init__(self, name=None, filename=None, channels=None,\n dependencies=None, prefix=None):\n self.name = name\n self.filename = filename\n self.prefix = prefix\n self.dependencies = Dependencies(dependencies)\n\n if channels is None:\n channels = []\n self.channels = channels\n\n def add_channels(self, channels):\n self.channels = list(unique(chain.from_iterable((channels, self.channels))))\n\n def remove_channels(self):\n self.channels = []\n\n def to_dict(self):\n d = yaml.dict([('name', self.name)])\n if self.channels:\n d['channels'] = self.channels\n if self.dependencies:\n d['dependencies'] = self.dependencies.raw\n if self.prefix:\n d['prefix'] = self.prefix\n return d\n\n def to_yaml(self, stream=None):\n d = self.to_dict()\n out = compat.u(yaml.dump(d, default_flow_style=False))\n if stream is None:\n return out\n stream.write(compat.b(out, encoding=\"utf-8\"))\n\n def save(self):\n with open(self.filename, \"wb\") as fp:\n self.to_yaml(stream=fp)\n", "path": "conda_env/env.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport os\nfrom collections import OrderedDict\nfrom conda.base.context import context\nfrom conda.cli import common # TODO: this should never have to import form conda.cli\nfrom conda.core.linked_data import linked\nfrom copy import copy\nfrom itertools import chain\n\nfrom . import compat, exceptions, yaml\nfrom .pip_util import add_pip_installed\n\ndef load_from_directory(directory):\n \"\"\"Load and return an ``Environment`` from a given ``directory``\"\"\"\n files = ['environment.yml', 'environment.yaml']\n while True:\n for f in files:\n try:\n return from_file(os.path.join(directory, f))\n except exceptions.EnvironmentFileNotFound:\n pass\n old_directory = directory\n directory = os.path.dirname(directory)\n if directory == old_directory:\n break\n raise exceptions.EnvironmentFileNotFound(files[0])\n\n\n# TODO This should lean more on conda instead of divining it from the outside\n# TODO tests!!!\ndef from_environment(name, prefix, no_builds=False, ignore_channels=False):\n \"\"\"\n Get environment object from prefix\n Args:\n name: The name of environment\n prefix: The path of prefix\n no_builds: Whether has build requirement\n ignore_channels: whether ignore_channels\n\n Returns: Environment object\n \"\"\"\n installed = linked(prefix, ignore_channels=ignore_channels)\n conda_pkgs = copy(installed)\n # json=True hides the output, data is added to installed\n add_pip_installed(prefix, installed, json=True)\n\n pip_pkgs = sorted(installed - conda_pkgs)\n\n if no_builds:\n dependencies = ['='.join((a.name, a.version)) for a in sorted(conda_pkgs)]\n else:\n dependencies = ['='.join((a.name, a.version, a.build)) for a in sorted(conda_pkgs)]\n if len(pip_pkgs) > 0:\n dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})\n # conda uses ruamel_yaml which returns a ruamel_yaml.comments.CommentedSeq\n # this doesn't dump correctly using pyyaml\n channels = list(context.channels)\n if not ignore_channels:\n for dist in conda_pkgs:\n if dist.channel not in channels:\n channels.insert(0, dist.channel)\n return Environment(name=name, dependencies=dependencies, channels=channels, prefix=prefix)\n\n\ndef from_yaml(yamlstr, **kwargs):\n \"\"\"Load and return a ``Environment`` from a given ``yaml string``\"\"\"\n data = yaml.load(yamlstr)\n if kwargs is not None:\n for key, value in kwargs.items():\n data[key] = value\n return Environment(**data)\n\n\ndef from_file(filename):\n if not os.path.exists(filename):\n raise exceptions.EnvironmentFileNotFound(filename)\n with open(filename, 'r') as fp:\n yamlstr = fp.read()\n return from_yaml(yamlstr, filename=filename)\n\n\n# TODO test explicitly\nclass Dependencies(OrderedDict):\n def __init__(self, raw, *args, **kwargs):\n super(Dependencies, self).__init__(*args, **kwargs)\n self.raw = raw\n self.parse()\n\n def parse(self):\n if not self.raw:\n return\n\n self.update({'conda': []})\n\n for line in self.raw:\n if isinstance(line, dict):\n self.update(line)\n else:\n self['conda'].append(common.arg2spec(line))\n\n # TODO only append when it's not already present\n def add(self, package_name):\n self.raw.append(package_name)\n self.parse()\n\n\ndef unique(seq, key=None):\n \"\"\" Return only unique elements of a sequence\n >>> tuple(unique((1, 2, 3)))\n (1, 2, 3)\n >>> tuple(unique((1, 2, 1, 3)))\n (1, 2, 3)\n Uniqueness can be defined by key keyword\n >>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))\n ('cat', 'mouse')\n \"\"\"\n seen = set()\n seen_add = seen.add\n if key is None:\n for item in seq:\n if item not in seen:\n seen_add(item)\n yield item\n else: # calculate key\n for item in seq:\n val = key(item)\n if val not in seen:\n seen_add(val)\n yield item\n\n\nclass Environment(object):\n def __init__(self, name=None, filename=None, channels=None,\n dependencies=None, prefix=None):\n self.name = name\n self.filename = filename\n self.prefix = prefix\n self.dependencies = Dependencies(dependencies)\n\n if channels is None:\n channels = []\n self.channels = channels\n\n def add_channels(self, channels):\n self.channels = list(unique(chain.from_iterable((channels, self.channels))))\n\n def remove_channels(self):\n self.channels = []\n\n def to_dict(self):\n d = yaml.dict([('name', self.name)])\n if self.channels:\n d['channels'] = self.channels\n if self.dependencies:\n d['dependencies'] = self.dependencies.raw\n if self.prefix:\n d['prefix'] = self.prefix\n return d\n\n def to_yaml(self, stream=None):\n d = self.to_dict()\n out = compat.u(yaml.dump(d, default_flow_style=False))\n if stream is None:\n return out\n stream.write(compat.b(out, encoding=\"utf-8\"))\n\n def save(self):\n with open(self.filename, \"wb\") as fp:\n self.to_yaml(stream=fp)\n", "path": "conda_env/env.py"}]} | 2,018 | 222 |
gh_patches_debug_6019 | rasdani/github-patches | git_diff | cupy__cupy-3335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`around` result is different compared to numpy
```
>>> np.__version__
'1.16.4'
>>> np.around([2.5])
array([2.])
>>> cupy.__version__
'7.0.0a1'
>>> cupy.around([2.5])
array([3.])
```
NumPy seems to round to even.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/math/rounding.py`
Content:
```
1 from cupy import core
2 from cupy.core import fusion
3 from cupy.math import ufunc
4
5
6 def around(a, decimals=0, out=None):
7 """Rounds to the given number of decimals.
8
9 Args:
10 a (cupy.ndarray): The source array.
11 decimals (int): umber of decimal places to round to (default: 0).
12 If decimals is negative, it specifies the number of positions to
13 the left of the decimal point.
14 out (cupy.ndarray): Output array.
15
16 Returns:
17 cupy.ndarray: Rounded array.
18
19 .. seealso:: :func:`numpy.around`
20
21 """
22 if fusion._is_fusing():
23 return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)
24 a = core.array(a, copy=False)
25 return a.round(decimals, out=out)
26
27
28 def round_(a, decimals=0, out=None):
29 return around(a, decimals, out=out)
30
31
32 rint = ufunc.create_math_ufunc(
33 'rint', 1, 'cupy_rint',
34 '''Rounds each element of an array to the nearest integer.
35
36 .. seealso:: :data:`numpy.rint`
37
38 ''')
39
40
41 floor = ufunc.create_math_ufunc(
42 'floor', 1, 'cupy_floor',
43 '''Rounds each element of an array to its floor integer.
44
45 .. seealso:: :data:`numpy.floor`
46
47 ''', support_complex=False)
48
49
50 ceil = ufunc.create_math_ufunc(
51 'ceil', 1, 'cupy_ceil',
52 '''Rounds each element of an array to its ceiling integer.
53
54 .. seealso:: :data:`numpy.ceil`
55
56 ''', support_complex=False)
57
58
59 trunc = ufunc.create_math_ufunc(
60 'trunc', 1, 'cupy_trunc',
61 '''Rounds each element of an array towards zero.
62
63 .. seealso:: :data:`numpy.trunc`
64
65 ''', support_complex=False)
66
67
68 fix = core.create_ufunc(
69 'cupy_fix', ('e->e', 'f->f', 'd->d'),
70 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',
71 doc='''If given value x is positive, it return floor(x).
72 Else, it return ceil(x).
73
74 .. seealso:: :func:`numpy.fix`
75
76 ''')
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/math/rounding.py b/cupy/math/rounding.py
--- a/cupy/math/rounding.py
+++ b/cupy/math/rounding.py
@@ -8,7 +8,7 @@
Args:
a (cupy.ndarray): The source array.
- decimals (int): umber of decimal places to round to (default: 0).
+ decimals (int): Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions to
the left of the decimal point.
out (cupy.ndarray): Output array.
| {"golden_diff": "diff --git a/cupy/math/rounding.py b/cupy/math/rounding.py\n--- a/cupy/math/rounding.py\n+++ b/cupy/math/rounding.py\n@@ -8,7 +8,7 @@\n \n Args:\n a (cupy.ndarray): The source array.\n- decimals (int): umber of decimal places to round to (default: 0).\n+ decimals (int): Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n", "issue": "`around` result is different compared to numpy\n```\r\n>>> np.__version__\r\n'1.16.4'\r\n>>> np.around([2.5])\r\narray([2.])\r\n>>> cupy.__version__\r\n'7.0.0a1'\r\n>>> cupy.around([2.5])\r\narray([3.])\r\n```\r\nNumPy seems to round to even.\n", "before_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\nfrom cupy.math import ufunc\n\n\ndef around(a, decimals=0, out=None):\n \"\"\"Rounds to the given number of decimals.\n\n Args:\n a (cupy.ndarray): The source array.\n decimals (int): umber of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Rounded array.\n\n .. seealso:: :func:`numpy.around`\n\n \"\"\"\n if fusion._is_fusing():\n return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)\n a = core.array(a, copy=False)\n return a.round(decimals, out=out)\n\n\ndef round_(a, decimals=0, out=None):\n return around(a, decimals, out=out)\n\n\nrint = ufunc.create_math_ufunc(\n 'rint', 1, 'cupy_rint',\n '''Rounds each element of an array to the nearest integer.\n\n .. seealso:: :data:`numpy.rint`\n\n ''')\n\n\nfloor = ufunc.create_math_ufunc(\n 'floor', 1, 'cupy_floor',\n '''Rounds each element of an array to its floor integer.\n\n .. seealso:: :data:`numpy.floor`\n\n ''', support_complex=False)\n\n\nceil = ufunc.create_math_ufunc(\n 'ceil', 1, 'cupy_ceil',\n '''Rounds each element of an array to its ceiling integer.\n\n .. seealso:: :data:`numpy.ceil`\n\n ''', support_complex=False)\n\n\ntrunc = ufunc.create_math_ufunc(\n 'trunc', 1, 'cupy_trunc',\n '''Rounds each element of an array towards zero.\n\n .. seealso:: :data:`numpy.trunc`\n\n ''', support_complex=False)\n\n\nfix = core.create_ufunc(\n 'cupy_fix', ('e->e', 'f->f', 'd->d'),\n 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',\n doc='''If given value x is positive, it return floor(x).\n Else, it return ceil(x).\n\n .. seealso:: :func:`numpy.fix`\n\n ''')\n", "path": "cupy/math/rounding.py"}], "after_files": [{"content": "from cupy import core\nfrom cupy.core import fusion\nfrom cupy.math import ufunc\n\n\ndef around(a, decimals=0, out=None):\n \"\"\"Rounds to the given number of decimals.\n\n Args:\n a (cupy.ndarray): The source array.\n decimals (int): Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of positions to\n the left of the decimal point.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: Rounded array.\n\n .. seealso:: :func:`numpy.around`\n\n \"\"\"\n if fusion._is_fusing():\n return fusion._call_ufunc(core.core._round_ufunc, a, decimals, out=out)\n a = core.array(a, copy=False)\n return a.round(decimals, out=out)\n\n\ndef round_(a, decimals=0, out=None):\n return around(a, decimals, out=out)\n\n\nrint = ufunc.create_math_ufunc(\n 'rint', 1, 'cupy_rint',\n '''Rounds each element of an array to the nearest integer.\n\n .. seealso:: :data:`numpy.rint`\n\n ''')\n\n\nfloor = ufunc.create_math_ufunc(\n 'floor', 1, 'cupy_floor',\n '''Rounds each element of an array to its floor integer.\n\n .. seealso:: :data:`numpy.floor`\n\n ''', support_complex=False)\n\n\nceil = ufunc.create_math_ufunc(\n 'ceil', 1, 'cupy_ceil',\n '''Rounds each element of an array to its ceiling integer.\n\n .. seealso:: :data:`numpy.ceil`\n\n ''', support_complex=False)\n\n\ntrunc = ufunc.create_math_ufunc(\n 'trunc', 1, 'cupy_trunc',\n '''Rounds each element of an array towards zero.\n\n .. seealso:: :data:`numpy.trunc`\n\n ''', support_complex=False)\n\n\nfix = core.create_ufunc(\n 'cupy_fix', ('e->e', 'f->f', 'd->d'),\n 'out0 = (in0 >= 0.0) ? floor(in0): ceil(in0)',\n doc='''If given value x is positive, it return floor(x).\n Else, it return ceil(x).\n\n .. seealso:: :func:`numpy.fix`\n\n ''')\n", "path": "cupy/math/rounding.py"}]} | 1,026 | 136 |
gh_patches_debug_27011 | rasdani/github-patches | git_diff | dask__distributed-8347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bokeh 3.3.0 uses absolute URLs and breaks proxied dashboards
I noticed when using `distributed==2023.10.1` with `bokeh==3.2.2` the URLs to the Bokeh JavaScript are relative.
```html
<script type="text/javascript" src="static/js/bokeh.min.js?v=3ca6425586de5036dc01992dd69aa61e9196dd02619557cfaeb1b3d8b77adf724be49401b1168483d165494ce57a6daa16e6f6d3660fef117d45028221f86357"></script>
```
But when upgrading to `bokeh==3.3.0` they become absolute.
```html
<script type="text/javascript" src="/static/js/bokeh.min.js?v=39ef57c3a83533e24f961e5c27f651a61045dbccefac4b5df86a7680b1edaff31886a7c0322250ffb0d758fa14ae156c9b640f60cca99f020096b050a4dbb571"></script>
```
This breaks dashboards that are being proxied at some sub-url.
Setting `dask scheduler --dashboard-prefix ""` doesn't fix it.
### Reproducer
```
$ pip install dask distributed bokeh==3.3.0
$ dask scheduler &
$ curl localhost:8787/status | grep bokeh.min.js
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/dashboard/core.py`
Content:
```
1 from __future__ import annotations
2
3 import functools
4 import warnings
5
6 from bokeh.application import Application
7 from bokeh.application.handlers.function import FunctionHandler
8 from bokeh.server.server import BokehTornado
9 from bokeh.server.util import create_hosts_allowlist
10
11 import dask
12
13 from distributed.dashboard.utils import BOKEH_VERSION
14 from distributed.versions import BOKEH_REQUIREMENT
15
16 # Set `prereleases=True` to allow for use with dev versions of `bokeh`
17 if not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):
18 warnings.warn(
19 f"\nDask needs {BOKEH_REQUIREMENT} for the dashboard."
20 f"\nYou have bokeh={BOKEH_VERSION}."
21 "\nContinuing without the dashboard."
22 )
23 raise ImportError(
24 f"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}"
25 )
26
27
28 if BOKEH_VERSION.major < 3:
29 from bokeh.models import Panel as TabPanel # noqa: F401
30 else:
31 from bokeh.models import TabPanel # noqa: F401
32
33
34 def BokehApplication(applications, server, prefix="/", template_variables=None):
35 template_variables = template_variables or {}
36 prefix = "/" + prefix.strip("/") + "/" if prefix else "/"
37
38 extra = {"prefix": prefix, **template_variables}
39
40 funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}
41 apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}
42
43 kwargs = dask.config.get("distributed.scheduler.dashboard.bokeh-application").copy()
44 extra_websocket_origins = create_hosts_allowlist(
45 kwargs.pop("allow_websocket_origin"), server.http_server.port
46 )
47
48 return BokehTornado(
49 apps,
50 prefix=prefix,
51 use_index=False,
52 extra_websocket_origins=extra_websocket_origins,
53 **kwargs,
54 )
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py
--- a/distributed/dashboard/core.py
+++ b/distributed/dashboard/core.py
@@ -5,6 +5,7 @@
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
+from bokeh.resources import Resources
from bokeh.server.server import BokehTornado
from bokeh.server.util import create_hosts_allowlist
@@ -31,6 +32,11 @@
from bokeh.models import TabPanel # noqa: F401
+class DaskBokehTornado(BokehTornado):
+ def resources(self, absolute_url: str | bool | None = True) -> Resources:
+ return super().resources(absolute_url)
+
+
def BokehApplication(applications, server, prefix="/", template_variables=None):
template_variables = template_variables or {}
prefix = "/" + prefix.strip("/") + "/" if prefix else "/"
@@ -45,10 +51,11 @@
kwargs.pop("allow_websocket_origin"), server.http_server.port
)
- return BokehTornado(
+ return DaskBokehTornado(
apps,
prefix=prefix,
use_index=False,
extra_websocket_origins=extra_websocket_origins,
+ absolute_url="",
**kwargs,
)
| {"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -5,6 +5,7 @@\n \n from bokeh.application import Application\n from bokeh.application.handlers.function import FunctionHandler\n+from bokeh.resources import Resources\n from bokeh.server.server import BokehTornado\n from bokeh.server.util import create_hosts_allowlist\n \n@@ -31,6 +32,11 @@\n from bokeh.models import TabPanel # noqa: F401\n \n \n+class DaskBokehTornado(BokehTornado):\n+ def resources(self, absolute_url: str | bool | None = True) -> Resources:\n+ return super().resources(absolute_url)\n+\n+\n def BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n@@ -45,10 +51,11 @@\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n \n- return BokehTornado(\n+ return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n+ absolute_url=\"\",\n **kwargs,\n )\n", "issue": "Bokeh 3.3.0 uses absolute URLs and breaks proxied dashboards\nI noticed when using `distributed==2023.10.1` with `bokeh==3.2.2` the URLs to the Bokeh JavaScript are relative.\r\n\r\n```html\r\n<script type=\"text/javascript\" src=\"static/js/bokeh.min.js?v=3ca6425586de5036dc01992dd69aa61e9196dd02619557cfaeb1b3d8b77adf724be49401b1168483d165494ce57a6daa16e6f6d3660fef117d45028221f86357\"></script>\r\n```\r\n\r\nBut when upgrading to `bokeh==3.3.0` they become absolute.\r\n\r\n```html\r\n<script type=\"text/javascript\" src=\"/static/js/bokeh.min.js?v=39ef57c3a83533e24f961e5c27f651a61045dbccefac4b5df86a7680b1edaff31886a7c0322250ffb0d758fa14ae156c9b640f60cca99f020096b050a4dbb571\"></script>\r\n```\r\n\r\nThis breaks dashboards that are being proxied at some sub-url.\r\n\r\nSetting `dask scheduler --dashboard-prefix \"\"` doesn't fix it.\r\n\r\n### Reproducer\r\n\r\n```\r\n$ pip install dask distributed bokeh==3.3.0\r\n$ dask scheduler &\r\n$ curl localhost:8787/status | grep bokeh.min.js\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.server.server import BokehTornado\nfrom bokeh.server.util import create_hosts_allowlist\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return BokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers.function import FunctionHandler\nfrom bokeh.resources import Resources\nfrom bokeh.server.server import BokehTornado\nfrom bokeh.server.util import create_hosts_allowlist\n\nimport dask\n\nfrom distributed.dashboard.utils import BOKEH_VERSION\nfrom distributed.versions import BOKEH_REQUIREMENT\n\n# Set `prereleases=True` to allow for use with dev versions of `bokeh`\nif not BOKEH_REQUIREMENT.specifier.contains(BOKEH_VERSION, prereleases=True):\n warnings.warn(\n f\"\\nDask needs {BOKEH_REQUIREMENT} for the dashboard.\"\n f\"\\nYou have bokeh={BOKEH_VERSION}.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\n f\"Dask needs {BOKEH_REQUIREMENT} for the dashboard, not bokeh={BOKEH_VERSION}\"\n )\n\n\nif BOKEH_VERSION.major < 3:\n from bokeh.models import Panel as TabPanel # noqa: F401\nelse:\n from bokeh.models import TabPanel # noqa: F401\n\n\nclass DaskBokehTornado(BokehTornado):\n def resources(self, absolute_url: str | bool | None = True) -> Resources:\n return super().resources(absolute_url)\n\n\ndef BokehApplication(applications, server, prefix=\"/\", template_variables=None):\n template_variables = template_variables or {}\n prefix = \"/\" + prefix.strip(\"/\") + \"/\" if prefix else \"/\"\n\n extra = {\"prefix\": prefix, **template_variables}\n\n funcs = {k: functools.partial(v, server, extra) for k, v in applications.items()}\n apps = {k: Application(FunctionHandler(v)) for k, v in funcs.items()}\n\n kwargs = dask.config.get(\"distributed.scheduler.dashboard.bokeh-application\").copy()\n extra_websocket_origins = create_hosts_allowlist(\n kwargs.pop(\"allow_websocket_origin\"), server.http_server.port\n )\n\n return DaskBokehTornado(\n apps,\n prefix=prefix,\n use_index=False,\n extra_websocket_origins=extra_websocket_origins,\n absolute_url=\"\",\n **kwargs,\n )\n", "path": "distributed/dashboard/core.py"}]} | 1,214 | 292 |
gh_patches_debug_64681 | rasdani/github-patches | git_diff | chainer__chainer-751 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`cupy.array_split` doesn't accept empty indecies
```
>>> x=cupy.array([1])
>>> cupy.array_split(x, [])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/unno/git/chainer/cupy/manipulation/split.py", line 32, in array_split
ret.append(ary[skip + (slice(index, size),)])
UnboundLocalError: local variable 'index' referenced before assignment
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/manipulation/split.py`
Content:
```
1 import numpy
2 import six
3
4
5 def array_split(ary, indices_or_sections, axis=0):
6 """Splits an array into multiple sub arrays along a given axis.
7
8 This function is almost equivalent to :func:`cupy.split`. The only
9 difference is that this function allows an integer sections that does not
10 evenly divide the axis.
11
12 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
13
14 """
15 if ary.ndim <= axis:
16 raise IndexError('Axis exceeds ndim')
17 size = ary.shape[axis]
18
19 if numpy.isscalar(indices_or_sections):
20 each_size = (size - 1) // indices_or_sections + 1
21 indices = [i * each_size
22 for i in six.moves.range(1, indices_or_sections)]
23 else:
24 indices = indices_or_sections
25
26 skip = (slice(None),) * axis
27 ret = []
28 i = 0
29 for index in indices:
30 ret.append(ary[skip + (slice(i, index),)])
31 i = index
32 ret.append(ary[skip + (slice(index, size),)])
33
34 return ret
35
36
37 def dsplit(ary, indices_or_sections):
38 """Splits an array into multiple sub arrays along the third axis.
39
40 This is equivalent to ``split`` with ``axis=2``.
41
42 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
43
44 """
45 if ary.ndim <= 2:
46 raise ValueError('Cannot dsplit an array with less than 3 dimensions')
47 return split(ary, indices_or_sections, 2)
48
49
50 def hsplit(ary, indices_or_sections):
51 """Splits an array into multiple sub arrays horizontally.
52
53 This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
54 dimension, and otherwise that with ``axis=1``.
55
56 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
57
58 """
59 if ary.ndim == 0:
60 raise ValueError('Cannot hsplit a zero-dimensional array')
61 if ary.ndim == 1:
62 return split(ary, indices_or_sections, 0)
63 else:
64 return split(ary, indices_or_sections, 1)
65
66
67 def split(ary, indices_or_sections, axis=0):
68 """Splits an array into multiple sub arrays along a given axis.
69
70 Args:
71 ary (cupy.ndarray): Array to split.
72 indices_or_sections (int or sequence of ints): A value indicating how
73 to divide the axis. If it is an integer, then is treated as the
74 number of sections, and the axis is evenly divided. Otherwise,
75 the integers indicate indices to split at. Note that the sequence
76 on the device memory is not allowed.
77 axis (int): Axis along which the array is split.
78
79 Returns:
80 A list of sub arrays. Eacy array is a view of the corresponding input
81 array.
82
83 .. seealso:: :func:`numpy.split`
84
85 """
86 if ary.ndim <= axis:
87 raise IndexError('Axis exceeds ndim')
88 size = ary.shape[axis]
89
90 if numpy.isscalar(indices_or_sections):
91 if size % indices_or_sections != 0:
92 raise ValueError(
93 'indices_or_sections must divide the size along the axes.\n'
94 'If you want to split the array into non-equally-sized '
95 'arrays, use array_split instead.')
96 return array_split(ary, indices_or_sections, axis)
97
98
99 def vsplit(ary, indices_or_sections):
100 """Splits an array into multiple sub arrays along the first axis.
101
102 This is equivalent to ``split`` with ``axis=0``.
103
104 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
105
106 """
107 if ary.ndim <= 1:
108 raise ValueError('Cannot vsplit an array with less than 2 dimensions')
109 return split(ary, indices_or_sections, 0)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py
--- a/cupy/manipulation/split.py
+++ b/cupy/manipulation/split.py
@@ -23,6 +23,9 @@
else:
indices = indices_or_sections
+ if len(indices) == 0:
+ return [ary]
+
skip = (slice(None),) * axis
ret = []
i = 0
| {"golden_diff": "diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py\n--- a/cupy/manipulation/split.py\n+++ b/cupy/manipulation/split.py\n@@ -23,6 +23,9 @@\n else:\n indices = indices_or_sections\n \n+ if len(indices) == 0:\n+ return [ary]\n+\n skip = (slice(None),) * axis\n ret = []\n i = 0\n", "issue": "`cupy.array_split` doesn't accept empty indecies\n```\n>>> x=cupy.array([1])\n>>> cupy.array_split(x, [])\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/home/unno/git/chainer/cupy/manipulation/split.py\", line 32, in array_split\n ret.append(ary[skip + (slice(index, size),)])\nUnboundLocalError: local variable 'index' referenced before assignment\n```\n\n", "before_files": [{"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}], "after_files": [{"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(index, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Eacy array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}]} | 1,476 | 104 |
gh_patches_debug_14270 | rasdani/github-patches | git_diff | streamlink__streamlink-562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.3.2 Release
Closes #562
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/__init__.py`
Content:
```
1 # coding: utf8
2 """Streamlink extracts streams from various services.
3
4 The main compontent of Streamlink is a command-line utility that
5 launches the streams in a video player.
6
7 An API is also provided that allows direct access to stream data.
8
9 Full documentation is available at https://streamlink.github.io.
10
11 """
12
13
14 __title__ = "streamlink"
15 __version__ = "0.3.1"
16 __license__ = "Simplified BSD"
17 __author__ = "Streamlink"
18 __copyright__ = "Copyright 2016 Streamlink"
19 __credits__ = [
20 "Agustín Carrasco (@asermax)",
21 "Andrew Bashore (@bashtech)",
22 "Andy Mikhailenko (@neithere)",
23 "Athanasios Oikonomou (@athoik)",
24 "Brian Callahan (@ibara)",
25 "Che (@chhe)",
26 "Christopher Rosell (@streamlink)",
27 "Daniel Meißner (@meise)",
28 "Daniel Miranda (@danielkza)",
29 "Daniel Wallace (@gtmanfred)",
30 "David Arvelo (@darvelo)",
31 "Dominik Dabrowski (@doda)",
32 "Erik G (@tboss)",
33 "Eric J (@wormeyman)",
34 "Ethan Jones (@jonesz)",
35 "Gaspard Jankowiak (@gapato)",
36 "Jaime Marquínez Ferrándiz (@jaimeMF)",
37 "Jan Tore Morken (@jantore)",
38 "John Peterson (@john-peterson)",
39 "Jon Bergli Heier (@sn4kebite)",
40 "Joseph Glanville (@josephglanville)",
41 "Julian Richen (@FireDart)",
42 "Kacper (@kasper93)",
43 "Martin Panter (@vadmium)",
44 "Max Nordlund (@maxnordlund)",
45 "Michael Cheah (@cheah)",
46 "Moritz Blanke",
47 "Niall McAndrew (@niallm90)",
48 "Niels Kräupl (@Gamewalker)",
49 "Pascal Romahn (@skulblakka)",
50 "Sam Edwards (@dotsam)",
51 "Stefan Breunig (@breunigs)",
52 "Suhail Patel (@suhailpatel)",
53 "Sunaga Takahiro (@sunaga720)",
54 "Vitaly Evtushenko (@eltiren)",
55 "Warnar Boekkooi (@boekkooi)",
56 "@blxd",
57 "@btiom",
58 "@daslicious",
59 "@MasterofJOKers",
60 "@mammothb",
61 "@medina",
62 "@monkeyphysics",
63 "@nixxquality",
64 "@papplampe",
65 "@Raziel-23",
66 "@t0mm0",
67 "@ToadKing",
68 "@unintended",
69 "@wolftankk",
70 "@yeeeargh"
71 ]
72
73 from .api import streams
74 from .exceptions import (StreamlinkError, PluginError, NoStreamsError,
75 NoPluginError, StreamError)
76 from .session import Streamlink
77
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from os import environ
4 from os.path import abspath, dirname, join
5 from setuptools import setup, find_packages
6 from sys import version_info, path as sys_path
7
8 deps = []
9
10 if version_info[0] == 2:
11 # Require backport of concurrent.futures on Python 2
12 deps.append("futures")
13
14 # Require backport of argparse on Python 2.6
15 if version_info[1] == 6:
16 deps.append("argparse")
17
18 # Require singledispatch on Python <3.4
19 if version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):
20 deps.append("singledispatch")
21
22 # requests 2.0 does not work correctly on Python <2.6.3
23 if (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):
24 deps.append("requests>=1.0,<2.0")
25 else:
26 deps.append("requests>=1.0,!=2.12.0,!=2.12.1,<3.0")
27
28 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
29 deps.append("pycryptodome>=3.4.3,<4")
30
31 # shutil.get_terminal_size and which were added in Python 3.3
32 if version_info[0] == 2:
33 deps.append("backports.shutil_which")
34 deps.append("backports.shutil_get_terminal_size")
35
36 # for localization
37 deps.append("iso-639")
38 deps.append("iso3166")
39
40 # When we build an egg for the Win32 bootstrap we don't want dependency
41 # information built into it.
42 if environ.get("NO_DEPS"):
43 deps = []
44
45 srcdir = join(dirname(abspath(__file__)), "src/")
46 sys_path.insert(0, srcdir)
47
48 setup(name="streamlink",
49 version="0.3.1",
50 description="Streamlink is command-line utility that extracts streams "
51 "from various services and pipes them into a video player of "
52 "choice.",
53 url="https://github.com/streamlink/streamlink",
54 author="Streamlink",
55 author_email="[email protected]", # temp until we have a mailing list / global email
56 license="Simplified BSD",
57 packages=find_packages("src"),
58 package_dir={"": "src"},
59 entry_points={
60 "console_scripts": ["streamlink=streamlink_cli.main:main"]
61 },
62 install_requires=deps,
63 test_suite="tests",
64 classifiers=["Development Status :: 5 - Production/Stable",
65 "Environment :: Console",
66 "Operating System :: POSIX",
67 "Operating System :: Microsoft :: Windows",
68 "Programming Language :: Python :: 2.6",
69 "Programming Language :: Python :: 2.7",
70 "Programming Language :: Python :: 3.3",
71 "Programming Language :: Python :: 3.4",
72 "Topic :: Internet :: WWW/HTTP",
73 "Topic :: Multimedia :: Sound/Audio",
74 "Topic :: Multimedia :: Video",
75 "Topic :: Utilities"])
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
sys_path.insert(0, srcdir)
setup(name="streamlink",
- version="0.3.1",
+ version="0.3.2",
description="Streamlink is command-line utility that extracts streams "
"from various services and pipes them into a video player of "
"choice.",
diff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py
--- a/src/streamlink/__init__.py
+++ b/src/streamlink/__init__.py
@@ -12,7 +12,7 @@
__title__ = "streamlink"
-__version__ = "0.3.1"
+__version__ = "0.3.2"
__license__ = "Simplified BSD"
__author__ = "Streamlink"
__copyright__ = "Copyright 2016 Streamlink"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n sys_path.insert(0, srcdir)\n \n setup(name=\"streamlink\",\n- version=\"0.3.1\",\n+ version=\"0.3.2\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\ndiff --git a/src/streamlink/__init__.py b/src/streamlink/__init__.py\n--- a/src/streamlink/__init__.py\n+++ b/src/streamlink/__init__.py\n@@ -12,7 +12,7 @@\n \n \n __title__ = \"streamlink\"\n-__version__ = \"0.3.1\"\n+__version__ = \"0.3.2\"\n __license__ = \"Simplified BSD\"\n __author__ = \"Streamlink\"\n __copyright__ = \"Copyright 2016 Streamlink\"\n", "issue": "0.3.2 Release\nCloses #562 \n", "before_files": [{"content": "# coding: utf8\n\"\"\"Streamlink extracts streams from various services.\n\nThe main compontent of Streamlink is a command-line utility that\nlaunches the streams in a video player.\n\nAn API is also provided that allows direct access to stream data.\n\nFull documentation is available at https://streamlink.github.io.\n\n\"\"\"\n\n\n__title__ = \"streamlink\"\n__version__ = \"0.3.1\"\n__license__ = \"Simplified BSD\"\n__author__ = \"Streamlink\"\n__copyright__ = \"Copyright 2016 Streamlink\"\n__credits__ = [\n \"Agust\u00edn Carrasco (@asermax)\",\n \"Andrew Bashore (@bashtech)\",\n \"Andy Mikhailenko (@neithere)\",\n \"Athanasios Oikonomou (@athoik)\",\n \"Brian Callahan (@ibara)\",\n \"Che (@chhe)\",\n \"Christopher Rosell (@streamlink)\",\n \"Daniel Mei\u00dfner (@meise)\",\n \"Daniel Miranda (@danielkza)\",\n \"Daniel Wallace (@gtmanfred)\",\n \"David Arvelo (@darvelo)\",\n \"Dominik Dabrowski (@doda)\",\n \"Erik G (@tboss)\",\n \"Eric J (@wormeyman)\",\n \"Ethan Jones (@jonesz)\",\n \"Gaspard Jankowiak (@gapato)\",\n \"Jaime Marqu\u00ednez Ferr\u00e1ndiz (@jaimeMF)\",\n \"Jan Tore Morken (@jantore)\",\n \"John Peterson (@john-peterson)\",\n \"Jon Bergli Heier (@sn4kebite)\",\n \"Joseph Glanville (@josephglanville)\",\n \"Julian Richen (@FireDart)\",\n \"Kacper (@kasper93)\",\n \"Martin Panter (@vadmium)\",\n \"Max Nordlund (@maxnordlund)\",\n \"Michael Cheah (@cheah)\",\n \"Moritz Blanke\",\n \"Niall McAndrew (@niallm90)\",\n \"Niels Kr\u00e4upl (@Gamewalker)\",\n \"Pascal Romahn (@skulblakka)\",\n \"Sam Edwards (@dotsam)\",\n \"Stefan Breunig (@breunigs)\",\n \"Suhail Patel (@suhailpatel)\",\n \"Sunaga Takahiro (@sunaga720)\",\n \"Vitaly Evtushenko (@eltiren)\",\n \"Warnar Boekkooi (@boekkooi)\",\n \"@blxd\",\n \"@btiom\",\n \"@daslicious\",\n \"@MasterofJOKers\",\n \"@mammothb\",\n \"@medina\",\n \"@monkeyphysics\",\n \"@nixxquality\",\n \"@papplampe\",\n \"@Raziel-23\",\n \"@t0mm0\",\n \"@ToadKing\",\n \"@unintended\",\n \"@wolftankk\",\n \"@yeeeargh\"\n]\n\nfrom .api import streams\nfrom .exceptions import (StreamlinkError, PluginError, NoStreamsError,\n NoPluginError, StreamError)\nfrom .session import Streamlink\n", "path": "src/streamlink/__init__.py"}, {"content": "#!/usr/bin/env python\n\nfrom os import environ\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\nfrom sys import version_info, path as sys_path\n\ndeps = []\n\nif version_info[0] == 2:\n # Require backport of concurrent.futures on Python 2\n deps.append(\"futures\")\n\n # Require backport of argparse on Python 2.6\n if version_info[1] == 6:\n deps.append(\"argparse\")\n\n# Require singledispatch on Python <3.4\nif version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):\n deps.append(\"singledispatch\")\n\n# requests 2.0 does not work correctly on Python <2.6.3\nif (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):\n deps.append(\"requests>=1.0,<2.0\")\nelse:\n deps.append(\"requests>=1.0,!=2.12.0,!=2.12.1,<3.0\")\n\n# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\ndeps.append(\"pycryptodome>=3.4.3,<4\")\n\n# shutil.get_terminal_size and which were added in Python 3.3\nif version_info[0] == 2:\n deps.append(\"backports.shutil_which\")\n deps.append(\"backports.shutil_get_terminal_size\")\n\n# for localization\ndeps.append(\"iso-639\")\ndeps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don't want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nsrcdir = join(dirname(abspath(__file__)), \"src/\")\nsys_path.insert(0, srcdir)\n\nsetup(name=\"streamlink\",\n version=\"0.3.1\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n url=\"https://github.com/streamlink/streamlink\",\n author=\"Streamlink\",\n author_email=\"[email protected]\", # temp until we have a mailing list / global email\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "# coding: utf8\n\"\"\"Streamlink extracts streams from various services.\n\nThe main compontent of Streamlink is a command-line utility that\nlaunches the streams in a video player.\n\nAn API is also provided that allows direct access to stream data.\n\nFull documentation is available at https://streamlink.github.io.\n\n\"\"\"\n\n\n__title__ = \"streamlink\"\n__version__ = \"0.3.2\"\n__license__ = \"Simplified BSD\"\n__author__ = \"Streamlink\"\n__copyright__ = \"Copyright 2016 Streamlink\"\n__credits__ = [\n \"Agust\u00edn Carrasco (@asermax)\",\n \"Andrew Bashore (@bashtech)\",\n \"Andy Mikhailenko (@neithere)\",\n \"Athanasios Oikonomou (@athoik)\",\n \"Brian Callahan (@ibara)\",\n \"Che (@chhe)\",\n \"Christopher Rosell (@streamlink)\",\n \"Daniel Mei\u00dfner (@meise)\",\n \"Daniel Miranda (@danielkza)\",\n \"Daniel Wallace (@gtmanfred)\",\n \"David Arvelo (@darvelo)\",\n \"Dominik Dabrowski (@doda)\",\n \"Erik G (@tboss)\",\n \"Eric J (@wormeyman)\",\n \"Ethan Jones (@jonesz)\",\n \"Gaspard Jankowiak (@gapato)\",\n \"Jaime Marqu\u00ednez Ferr\u00e1ndiz (@jaimeMF)\",\n \"Jan Tore Morken (@jantore)\",\n \"John Peterson (@john-peterson)\",\n \"Jon Bergli Heier (@sn4kebite)\",\n \"Joseph Glanville (@josephglanville)\",\n \"Julian Richen (@FireDart)\",\n \"Kacper (@kasper93)\",\n \"Martin Panter (@vadmium)\",\n \"Max Nordlund (@maxnordlund)\",\n \"Michael Cheah (@cheah)\",\n \"Moritz Blanke\",\n \"Niall McAndrew (@niallm90)\",\n \"Niels Kr\u00e4upl (@Gamewalker)\",\n \"Pascal Romahn (@skulblakka)\",\n \"Sam Edwards (@dotsam)\",\n \"Stefan Breunig (@breunigs)\",\n \"Suhail Patel (@suhailpatel)\",\n \"Sunaga Takahiro (@sunaga720)\",\n \"Vitaly Evtushenko (@eltiren)\",\n \"Warnar Boekkooi (@boekkooi)\",\n \"@blxd\",\n \"@btiom\",\n \"@daslicious\",\n \"@MasterofJOKers\",\n \"@mammothb\",\n \"@medina\",\n \"@monkeyphysics\",\n \"@nixxquality\",\n \"@papplampe\",\n \"@Raziel-23\",\n \"@t0mm0\",\n \"@ToadKing\",\n \"@unintended\",\n \"@wolftankk\",\n \"@yeeeargh\"\n]\n\nfrom .api import streams\nfrom .exceptions import (StreamlinkError, PluginError, NoStreamsError,\n NoPluginError, StreamError)\nfrom .session import Streamlink\n", "path": "src/streamlink/__init__.py"}, {"content": "#!/usr/bin/env python\n\nfrom os import environ\nfrom os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\nfrom sys import version_info, path as sys_path\n\ndeps = []\n\nif version_info[0] == 2:\n # Require backport of concurrent.futures on Python 2\n deps.append(\"futures\")\n\n # Require backport of argparse on Python 2.6\n if version_info[1] == 6:\n deps.append(\"argparse\")\n\n# Require singledispatch on Python <3.4\nif version_info[0] == 2 or (version_info[0] == 3 and version_info[1] < 4):\n deps.append(\"singledispatch\")\n\n# requests 2.0 does not work correctly on Python <2.6.3\nif (version_info[0] == 2 and version_info[1] == 6 and version_info[2] < 3):\n deps.append(\"requests>=1.0,<2.0\")\nelse:\n deps.append(\"requests>=1.0,!=2.12.0,!=2.12.1,<3.0\")\n\n# this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\ndeps.append(\"pycryptodome>=3.4.3,<4\")\n\n# shutil.get_terminal_size and which were added in Python 3.3\nif version_info[0] == 2:\n deps.append(\"backports.shutil_which\")\n deps.append(\"backports.shutil_get_terminal_size\")\n\n# for localization\ndeps.append(\"iso-639\")\ndeps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don't want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nsrcdir = join(dirname(abspath(__file__)), \"src/\")\nsys_path.insert(0, srcdir)\n\nsetup(name=\"streamlink\",\n version=\"0.3.2\",\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n url=\"https://github.com/streamlink/streamlink\",\n author=\"Streamlink\",\n author_email=\"[email protected]\", # temp until we have a mailing list / global email\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 1,949 | 215 |
gh_patches_debug_303 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gi._gobject.option is not part of pygobject
The [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject.
This leads to the following warning when packaging a Gtk application:
```
4813 INFO: Loading module hook "hook-gi.py"...
4818 INFO: Loading module hook "hook-gi.repository.GObject.py"...
4926 INFO: Processing pre-safe import module hook gi.repository.GLib
4963 WARNING: Hidden import "gi._gobject.option" not found!
```
Browsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py`
Replacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-gi.repository.GObject.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2016, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 """
10 Import hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib
11 library https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject
12 via the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection
13
14 Tested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and
15 GLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7
16 """
17
18 from PyInstaller.utils.hooks import get_gi_typelibs
19
20 binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')
21
22 hiddenimports += ['gi._gobject.option', 'gi._gobject']
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py
--- a/PyInstaller/hooks/hook-gi.repository.GObject.py
+++ b/PyInstaller/hooks/hook-gi.repository.GObject.py
@@ -19,4 +19,4 @@
binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')
-hiddenimports += ['gi._gobject.option', 'gi._gobject']
+hiddenimports += ['gi._gobject']
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-gi.repository.GObject.py b/PyInstaller/hooks/hook-gi.repository.GObject.py\n--- a/PyInstaller/hooks/hook-gi.repository.GObject.py\n+++ b/PyInstaller/hooks/hook-gi.repository.GObject.py\n@@ -19,4 +19,4 @@\n \n binaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n \n-hiddenimports += ['gi._gobject.option', 'gi._gobject']\n+hiddenimports += ['gi._gobject']\n", "issue": "gi._gobject.option is not part of pygobject\nThe [GObject hook](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/hook-gi.repository.GObject.py) adds a `hiddenimport` for `gi._gobject.option` however `gi/_gobject/option.py` is not part of pygobject.\r\n\r\nThis leads to the following warning when packaging a Gtk application:\r\n```\r\n4813 INFO: Loading module hook \"hook-gi.py\"...\r\n4818 INFO: Loading module hook \"hook-gi.repository.GObject.py\"...\r\n4926 INFO: Processing pre-safe import module hook gi.repository.GLib\r\n4963 WARNING: Hidden import \"gi._gobject.option\" not found!\r\n```\r\n\r\nBrowsing through the [pygobject git history](https://git.gnome.org/browse/pygobject/), I find commit [8afd7e8](https://git.gnome.org/browse/pygobject/commit/gi/_option.py?id=8afd7e880a72a44e6ea46c763bab82146fd75c96) which moved `gi/_glib/option.py` into `gi/_option.py`\r\n\r\nReplacing the `hiddenimport` to `hiddenimports += ['gi._option', 'gi._gobject']` silences the issue. However, I do not yet understand enough about pygobject and pyinstaller to know if this is the right thing to do.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject.option', 'gi._gobject']\n", "path": "PyInstaller/hooks/hook-gi.repository.GObject.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2016, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\"\"\"\nImport hook for GObject https://developer.gnome.org/gobject/stable/ from the GLib\nlibrary https://wiki.gnome.org/Projects/GLib introspected through PyGobject https://wiki.gnome.org/PyGObject\nvia the GObject Introspection middleware layer https://wiki.gnome.org/Projects/GObjectIntrospection\n\nTested with GLib 2.44.1, PyGObject 3.16.2, and GObject Introspection 1.44.0 on Mac OS X 10.10 and\nGLib 2.42.2, PyGObject 3.14.0, and GObject Introspection 1.42 on Windows 7\n\"\"\"\n\nfrom PyInstaller.utils.hooks import get_gi_typelibs\n\nbinaries, datas, hiddenimports = get_gi_typelibs('GObject', '2.0')\n\nhiddenimports += ['gi._gobject']\n", "path": "PyInstaller/hooks/hook-gi.repository.GObject.py"}]} | 899 | 122 |
gh_patches_debug_3782 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2314 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Normalize stored ISNI
The `ISNI` field should always be in the same format (without spaces), but right now sometimes the field is stored with spaces between the digits. There should be validation when the author is saved that cleans this value, similar to how ISBNs are validated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/models/author.py`
Content:
```
1 """ database schema for info about authors """
2 import re
3 from django.contrib.postgres.indexes import GinIndex
4 from django.core.cache import cache
5 from django.core.cache.utils import make_template_fragment_key
6 from django.db import models
7
8 from bookwyrm import activitypub
9 from bookwyrm.settings import DOMAIN
10
11 from .book import BookDataModel
12 from . import fields
13
14
15 class Author(BookDataModel):
16 """basic biographic info"""
17
18 wikipedia_link = fields.CharField(
19 max_length=255, blank=True, null=True, deduplication_field=True
20 )
21 isni = fields.CharField(
22 max_length=255, blank=True, null=True, deduplication_field=True
23 )
24 gutenberg_id = fields.CharField(
25 max_length=255, blank=True, null=True, deduplication_field=True
26 )
27 # idk probably other keys would be useful here?
28 born = fields.DateTimeField(blank=True, null=True)
29 died = fields.DateTimeField(blank=True, null=True)
30 name = fields.CharField(max_length=255)
31 aliases = fields.ArrayField(
32 models.CharField(max_length=255), blank=True, default=list
33 )
34 bio = fields.HtmlField(null=True, blank=True)
35
36 def save(self, *args, **kwargs):
37 """clear related template caches"""
38 # clear template caches
39 if self.id:
40 cache_keys = [
41 make_template_fragment_key("titleby", [book])
42 for book in self.book_set.values_list("id", flat=True)
43 ]
44 cache.delete_many(cache_keys)
45 return super().save(*args, **kwargs)
46
47 @property
48 def isni_link(self):
49 """generate the url from the isni id"""
50 clean_isni = re.sub(r"\s", "", self.isni)
51 return f"https://isni.org/isni/{clean_isni}"
52
53 @property
54 def openlibrary_link(self):
55 """generate the url from the openlibrary id"""
56 return f"https://openlibrary.org/authors/{self.openlibrary_key}"
57
58 def get_remote_id(self):
59 """editions and works both use "book" instead of model_name"""
60 return f"https://{DOMAIN}/author/{self.id}"
61
62 activity_serializer = activitypub.Author
63
64 class Meta:
65 """sets up postgres GIN index field"""
66
67 indexes = (GinIndex(fields=["search_vector"]),)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/models/author.py b/bookwyrm/models/author.py
--- a/bookwyrm/models/author.py
+++ b/bookwyrm/models/author.py
@@ -42,6 +42,11 @@
for book in self.book_set.values_list("id", flat=True)
]
cache.delete_many(cache_keys)
+
+ # normalize isni format
+ if self.isni:
+ self.isni = re.sub(r"\s", "", self.isni)
+
return super().save(*args, **kwargs)
@property
| {"golden_diff": "diff --git a/bookwyrm/models/author.py b/bookwyrm/models/author.py\n--- a/bookwyrm/models/author.py\n+++ b/bookwyrm/models/author.py\n@@ -42,6 +42,11 @@\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n+\n+ # normalize isni format\n+ if self.isni:\n+ self.isni = re.sub(r\"\\s\", \"\", self.isni)\n+\n return super().save(*args, **kwargs)\n \n @property\n", "issue": "Normalize stored ISNI\nThe `ISNI` field should always be in the same format (without spaces), but right now sometimes the field is stored with spaces between the digits. There should be validation when the author is saved that cleans this value, similar to how ISBNs are validated\n", "before_files": [{"content": "\"\"\" database schema for info about authors \"\"\"\nimport re\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.core.cache import cache\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .book import BookDataModel\nfrom . import fields\n\n\nclass Author(BookDataModel):\n \"\"\"basic biographic info\"\"\"\n\n wikipedia_link = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isni = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n gutenberg_id = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # idk probably other keys would be useful here?\n born = fields.DateTimeField(blank=True, null=True)\n died = fields.DateTimeField(blank=True, null=True)\n name = fields.CharField(max_length=255)\n aliases = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n bio = fields.HtmlField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n \"\"\"clear related template caches\"\"\"\n # clear template caches\n if self.id:\n cache_keys = [\n make_template_fragment_key(\"titleby\", [book])\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n return super().save(*args, **kwargs)\n\n @property\n def isni_link(self):\n \"\"\"generate the url from the isni id\"\"\"\n clean_isni = re.sub(r\"\\s\", \"\", self.isni)\n return f\"https://isni.org/isni/{clean_isni}\"\n\n @property\n def openlibrary_link(self):\n \"\"\"generate the url from the openlibrary id\"\"\"\n return f\"https://openlibrary.org/authors/{self.openlibrary_key}\"\n\n def get_remote_id(self):\n \"\"\"editions and works both use \"book\" instead of model_name\"\"\"\n return f\"https://{DOMAIN}/author/{self.id}\"\n\n activity_serializer = activitypub.Author\n\n class Meta:\n \"\"\"sets up postgres GIN index field\"\"\"\n\n indexes = (GinIndex(fields=[\"search_vector\"]),)\n", "path": "bookwyrm/models/author.py"}], "after_files": [{"content": "\"\"\" database schema for info about authors \"\"\"\nimport re\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.core.cache import cache\nfrom django.core.cache.utils import make_template_fragment_key\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom bookwyrm.settings import DOMAIN\n\nfrom .book import BookDataModel\nfrom . import fields\n\n\nclass Author(BookDataModel):\n \"\"\"basic biographic info\"\"\"\n\n wikipedia_link = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n isni = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n gutenberg_id = fields.CharField(\n max_length=255, blank=True, null=True, deduplication_field=True\n )\n # idk probably other keys would be useful here?\n born = fields.DateTimeField(blank=True, null=True)\n died = fields.DateTimeField(blank=True, null=True)\n name = fields.CharField(max_length=255)\n aliases = fields.ArrayField(\n models.CharField(max_length=255), blank=True, default=list\n )\n bio = fields.HtmlField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n \"\"\"clear related template caches\"\"\"\n # clear template caches\n if self.id:\n cache_keys = [\n make_template_fragment_key(\"titleby\", [book])\n for book in self.book_set.values_list(\"id\", flat=True)\n ]\n cache.delete_many(cache_keys)\n\n # normalize isni format\n if self.isni:\n self.isni = re.sub(r\"\\s\", \"\", self.isni)\n\n return super().save(*args, **kwargs)\n\n @property\n def isni_link(self):\n \"\"\"generate the url from the isni id\"\"\"\n clean_isni = re.sub(r\"\\s\", \"\", self.isni)\n return f\"https://isni.org/isni/{clean_isni}\"\n\n @property\n def openlibrary_link(self):\n \"\"\"generate the url from the openlibrary id\"\"\"\n return f\"https://openlibrary.org/authors/{self.openlibrary_key}\"\n\n def get_remote_id(self):\n \"\"\"editions and works both use \"book\" instead of model_name\"\"\"\n return f\"https://{DOMAIN}/author/{self.id}\"\n\n activity_serializer = activitypub.Author\n\n class Meta:\n \"\"\"sets up postgres GIN index field\"\"\"\n\n indexes = (GinIndex(fields=[\"search_vector\"]),)\n", "path": "bookwyrm/models/author.py"}]} | 959 | 127 |
gh_patches_debug_2665 | rasdani/github-patches | git_diff | opsdroid__opsdroid-946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyPI deployments are failing
Looks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.
```
HTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/
```
PyPI deployments are failing
Looks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.
```
HTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = 'opsdroid'
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, 'README.md'), encoding="utf8").read()
12
13 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',
14 'modules.*', 'docs', 'docs.*'])
15
16
17 # For now we simply define the install_requires based on the contents
18 # of requirements.txt. In the future, install_requires may become much
19 # looser than the (automatically) resolved requirements.txt.
20 with open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:
21 REQUIRES = [line.strip() for line in fh]
22
23
24 class Develop(develop):
25 """Custom `develop` command to always build mo files on install -e."""
26
27 def run(self):
28 self.run_command('compile_catalog')
29 develop.run(self) # old style class
30
31
32 class BuildPy(build_py):
33 """Custom `build_py` command to always build mo files for wheels."""
34
35 def run(self):
36 self.run_command('compile_catalog')
37 build_py.run(self) # old style class
38
39
40 class Sdist(sdist):
41 """Custom `sdist` command to ensure that mo files are always created."""
42
43 def run(self):
44 self.run_command('compile_catalog')
45 sdist.run(self) # old style class
46
47
48 setup(
49 name=PACKAGE_NAME,
50 version=versioneer.get_version(),
51 license='Apache License 2.0',
52 url='https://opsdroid.github.io/',
53 download_url='https://github.com/opsdroid/opsdroid/releases',
54 author='Jacob Tomlinson',
55 author_email='[email protected]',
56 description='An open source ChatOps bot framework.',
57 long_description=README,
58 packages=PACKAGES,
59 include_package_data=True,
60 zip_safe=False,
61 platforms='any',
62 classifiers=[
63 'Development Status :: 4 - Beta',
64 'Environment :: Console',
65 'Framework :: AsyncIO',
66 'Intended Audience :: Developers',
67 'Intended Audience :: System Administrators',
68 'Intended Audience :: Information Technology',
69 'License :: OSI Approved :: Apache Software License',
70 'Programming Language :: Python',
71 'Programming Language :: Python :: 3',
72 'Programming Language :: Python :: 3 :: Only',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Topic :: Communications :: Chat',
77 'Topic :: Scientific/Engineering :: Artificial Intelligence',
78 'Topic :: Software Development :: Libraries :: Python Modules'
79 ],
80 install_requires=REQUIRES,
81 test_suite='tests',
82 keywords=[
83 'bot',
84 'bot-framework',
85 'opsdroid',
86 'botkit',
87 'python3',
88 'asyncio',
89 'chatops',
90 'devops',
91 'nlu'
92 ],
93 setup_requires=['Babel'],
94 cmdclass=versioneer.get_cmdclass({'sdist': Sdist,
95 'build_py': BuildPy,
96 'develop': Develop}),
97 entry_points={
98 'console_scripts': [
99 'opsdroid = opsdroid.__main__:main'
100 ]
101 },
102 )
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,6 +55,7 @@
author_email='[email protected]',
description='An open source ChatOps bot framework.',
long_description=README,
+ long_description_content_type='text/markdown',
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,6 +55,7 @@\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n+ long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n", "issue": "PyPI deployments are failing\nLooks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.\r\n\r\n```\r\nHTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/\r\n```\nPyPI deployments are failing\nLooks like PyPI deployments are failing. `v0.15.1` and `v0.15.2` haven't gone out.\r\n\r\n```\r\nHTTPError: 400 Client Error: The description failed to render in the default format of reStructuredText. See https://pypi.org/help/#description-content-type for more information. for url: https://upload.pypi.org/legacy/\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}]} | 1,394 | 89 |
gh_patches_debug_26907 | rasdani/github-patches | git_diff | google__turbinia-696 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generate version data from tags and commits
Today we have a hard-coded value in `turbinia/__init__.py`, but it would be nice to generate the version number from the current TAG for releases and from the git commit id when there is no TAG (ie. when running from master or a different branch).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2017 Google Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 """This is the setup file for the project."""
18
19 # yapf: disable
20
21 from __future__ import unicode_literals
22
23 import sys
24
25 from setuptools import find_packages
26 from setuptools import setup
27
28
29 # make sure turbinia is in path
30 sys.path.insert(0, '.')
31
32 import turbinia # pylint: disable=wrong-import-position
33
34 turbinia_description = (
35 'Turbinia is an open-source framework for deploying, managing, and running'
36 'forensic workloads on cloud platforms. It is intended to automate running '
37 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '
38 'help with processing evidence in the Cloud, scaling the processing of '
39 'large amounts of evidence, and decreasing response time by parallelizing'
40 'processing where possible.')
41
42 requirements = []
43 with open('requirements.txt','r') as f:
44 requirements = f.read().splitlines()
45 setup(
46 name='turbinia',
47 version=turbinia.__version__,
48 description='Automation and Scaling of Digital Forensics Tools',
49 long_description=turbinia_description,
50 license='Apache License, Version 2.0',
51 url='http://turbinia.plumbing/',
52 maintainer='Turbinia development team',
53 maintainer_email='[email protected]',
54 classifiers=[
55 'Development Status :: 4 - Beta',
56 'Environment :: Console',
57 'Operating System :: OS Independent',
58 'Programming Language :: Python',
59 ],
60 packages=find_packages(),
61 include_package_data=True,
62 zip_safe=False,
63 entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},
64 install_requires=requirements,
65 extras_require={
66 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],
67 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],
68 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']
69 }
70 )
71
```
Path: `turbinia/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2015 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Main Turbinia application."""
16
17 __version__ = '20190819'
18
19 import logging
20 log = logging.getLogger('turbinia')
21
22
23 def log_and_report(message, trace):
24 """Log an error and if enabled, send to GCP Error Reporting API.
25
26 Args:
27 message(str): The user defined message to log.
28 trace(str): The error traceback message to log.
29 """
30 from turbinia import config
31
32 log.error(message)
33 log.error(trace)
34 # If GCP Error Reporting is enabled.
35 config.LoadConfig()
36 if config.STACKDRIVER_TRACEBACK:
37 # Only load google_cloud if needed
38 from turbinia.lib import google_cloud
39 client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)
40 client.report_exception()
41
42
43 class TurbiniaException(Exception):
44 """Turbinia Exception class."""
45 pass
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -44,7 +44,6 @@
requirements = f.read().splitlines()
setup(
name='turbinia',
- version=turbinia.__version__,
description='Automation and Scaling of Digital Forensics Tools',
long_description=turbinia_description,
license='Apache License, Version 2.0',
@@ -66,5 +65,7 @@
'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],
'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],
'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']
- }
+ },
+ use_scm_version=True,
+ setup_requires=['setuptools_scm']
)
diff --git a/turbinia/__init__.py b/turbinia/__init__.py
--- a/turbinia/__init__.py
+++ b/turbinia/__init__.py
@@ -14,11 +14,15 @@
# limitations under the License.
"""Main Turbinia application."""
-__version__ = '20190819'
-
import logging
log = logging.getLogger('turbinia')
+from pkg_resources import get_distribution, DistributionNotFound
+try:
+ __version__ = get_distribution(__name__).version
+except DistributionNotFound:
+ __version__ = "unknown"
+
def log_and_report(message, trace):
"""Log an error and if enabled, send to GCP Error Reporting API.
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,7 +44,6 @@\n requirements = f.read().splitlines()\n setup(\n name='turbinia',\n- version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n@@ -66,5 +65,7 @@\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n- }\n+ },\n+ use_scm_version=True,\n+ setup_requires=['setuptools_scm']\n )\ndiff --git a/turbinia/__init__.py b/turbinia/__init__.py\n--- a/turbinia/__init__.py\n+++ b/turbinia/__init__.py\n@@ -14,11 +14,15 @@\n # limitations under the License.\n \"\"\"Main Turbinia application.\"\"\"\n \n-__version__ = '20190819'\n-\n import logging\n log = logging.getLogger('turbinia')\n \n+from pkg_resources import get_distribution, DistributionNotFound\n+try:\n+ __version__ = get_distribution(__name__).version\n+except DistributionNotFound:\n+ __version__ = \"unknown\"\n+\n \n def log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n", "issue": "Generate version data from tags and commits\nToday we have a hard-coded value in `turbinia/__init__.py`, but it would be nice to generate the version number from the current TAG for releases and from the git commit id when there is no TAG (ie. when running from master or a different branch).\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project.\"\"\"\n\n# yapf: disable\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\n# make sure turbinia is in path\nsys.path.insert(0, '.')\n\nimport turbinia # pylint: disable=wrong-import-position\n\nturbinia_description = (\n 'Turbinia is an open-source framework for deploying, managing, and running'\n 'forensic workloads on cloud platforms. It is intended to automate running '\n 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '\n 'help with processing evidence in the Cloud, scaling the processing of '\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n\nrequirements = []\nwith open('requirements.txt','r') as f:\n requirements = f.read().splitlines()\nsetup(\n name='turbinia',\n version=turbinia.__version__,\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n url='http://turbinia.plumbing/',\n maintainer='Turbinia development team',\n maintainer_email='[email protected]',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n install_requires=requirements,\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n }\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main Turbinia application.\"\"\"\n\n__version__ = '20190819'\n\nimport logging\nlog = logging.getLogger('turbinia')\n\n\ndef log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n\n Args:\n message(str): The user defined message to log.\n trace(str): The error traceback message to log.\n \"\"\"\n from turbinia import config\n\n log.error(message)\n log.error(trace)\n # If GCP Error Reporting is enabled.\n config.LoadConfig()\n if config.STACKDRIVER_TRACEBACK:\n # Only load google_cloud if needed\n from turbinia.lib import google_cloud\n client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)\n client.report_exception()\n\n\nclass TurbiniaException(Exception):\n \"\"\"Turbinia Exception class.\"\"\"\n pass\n", "path": "turbinia/__init__.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the setup file for the project.\"\"\"\n\n# yapf: disable\n\nfrom __future__ import unicode_literals\n\nimport sys\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\n# make sure turbinia is in path\nsys.path.insert(0, '.')\n\nimport turbinia # pylint: disable=wrong-import-position\n\nturbinia_description = (\n 'Turbinia is an open-source framework for deploying, managing, and running'\n 'forensic workloads on cloud platforms. It is intended to automate running '\n 'of common forensic processing tools (i.e. Plaso, TSK, strings, etc) to '\n 'help with processing evidence in the Cloud, scaling the processing of '\n 'large amounts of evidence, and decreasing response time by parallelizing'\n 'processing where possible.')\n\nrequirements = []\nwith open('requirements.txt','r') as f:\n requirements = f.read().splitlines()\nsetup(\n name='turbinia',\n description='Automation and Scaling of Digital Forensics Tools',\n long_description=turbinia_description,\n license='Apache License, Version 2.0',\n url='http://turbinia.plumbing/',\n maintainer='Turbinia development team',\n maintainer_email='[email protected]',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={'console_scripts': ['turbiniactl=turbinia.turbiniactl:main']},\n install_requires=requirements,\n extras_require={\n 'dev': ['mock', 'nose', 'yapf', 'celery~=4.1', 'coverage'],\n 'local': ['celery~=4.1', 'kombu~=4.1', 'redis~=3.0'],\n 'worker': ['docker-explorer>=20191104', 'plaso>=20200430', 'pyhindsight>=20200607']\n },\n use_scm_version=True,\n setup_requires=['setuptools_scm']\n)\n", "path": "setup.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Main Turbinia application.\"\"\"\n\nimport logging\nlog = logging.getLogger('turbinia')\n\nfrom pkg_resources import get_distribution, DistributionNotFound\ntry:\n __version__ = get_distribution(__name__).version\nexcept DistributionNotFound:\n __version__ = \"unknown\"\n\n\ndef log_and_report(message, trace):\n \"\"\"Log an error and if enabled, send to GCP Error Reporting API.\n\n Args:\n message(str): The user defined message to log.\n trace(str): The error traceback message to log.\n \"\"\"\n from turbinia import config\n\n log.error(message)\n log.error(trace)\n # If GCP Error Reporting is enabled.\n config.LoadConfig()\n if config.STACKDRIVER_TRACEBACK:\n # Only load google_cloud if needed\n from turbinia.lib import google_cloud\n client = google_cloud.setup_stackdriver_traceback(config.TURBINIA_PROJECT)\n client.report_exception()\n\n\nclass TurbiniaException(Exception):\n \"\"\"Turbinia Exception class.\"\"\"\n pass\n", "path": "turbinia/__init__.py"}]} | 1,513 | 400 |
gh_patches_debug_2116 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3383 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Viewer configuration does not show linking options
**Describe the bug**
The view and edit pages for viewer configurations no longer show options to set the linking configuration.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/
2. Scroll down to 'Linking Configuration'
The options displayed are duplicates of the 'Plugin and Tools' section.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/workstation_configs/forms.py`
Content:
```
1 from crispy_forms.helper import FormHelper
2 from crispy_forms.layout import Fieldset, Layout, Submit
3 from django.forms import ModelForm
4 from django_select2.forms import Select2MultipleWidget
5
6 from grandchallenge.core.forms import SaveFormInitMixin
7 from grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget
8 from grandchallenge.workstation_configs.models import (
9 KEY_BINDINGS_SCHEMA,
10 OVERLAY_SEGMENTS_SCHEMA,
11 WorkstationConfig,
12 )
13
14 GENERAL_FIELDS = (
15 "title",
16 "description",
17 "image_context",
18 "window_presets",
19 "default_window_preset",
20 "default_slab_thickness_mm",
21 "default_slab_render_method",
22 "default_orientation",
23 "default_image_interpolation",
24 "default_limit_view_area_to_image_volume",
25 "default_overlay_alpha",
26 "ghosting_slice_depth",
27 "overlay_luts",
28 "default_overlay_lut",
29 "default_overlay_interpolation",
30 "overlay_segments",
31 "key_bindings",
32 "default_zoom_scale",
33 "default_brush_size",
34 "default_annotation_color",
35 "default_annotation_line_width",
36 "auto_jump_center_of_gravity",
37 "point_bounding_box_size_mm",
38 )
39 PLUGIN_FIELDS = (
40 "show_image_info_plugin",
41 "show_display_plugin",
42 "show_image_switcher_plugin",
43 "show_algorithm_output_plugin",
44 "show_overlay_plugin",
45 "show_annotation_statistics_plugin",
46 "show_swivel_tool",
47 "show_invert_tool",
48 "show_flip_tool",
49 "show_window_level_tool",
50 "show_reset_tool",
51 "show_overlay_selection_tool",
52 "show_lut_selection_tool",
53 "show_annotation_counter_tool",
54 "enable_contrast_enhancement",
55 )
56 LINKED_FIELDS = (
57 "link_images",
58 "link_panning",
59 "link_zooming",
60 "link_slicing",
61 "link_orienting",
62 "link_windowing",
63 "link_inverting",
64 "link_flipping",
65 )
66
67
68 class WorkstationConfigForm(SaveFormInitMixin, ModelForm):
69 def __init__(self, *args, read_only=False, **kwargs):
70 super().__init__(*args, **kwargs)
71
72 self.helper = FormHelper(self)
73 self.helper.layout = Layout(
74 Fieldset("", *GENERAL_FIELDS),
75 Fieldset(
76 "Plugins and Tools",
77 *PLUGIN_FIELDS,
78 css_class="border rounded px-2 my-4",
79 ),
80 Fieldset(
81 "Linking Configuration",
82 *PLUGIN_FIELDS,
83 css_class="border rounded px-2 my-4",
84 ),
85 )
86
87 if read_only:
88 for field in self.fields:
89 self.fields[field].disabled = True
90 else:
91 self.helper.layout.append(Submit("save", "Save"))
92
93 class Meta:
94 model = WorkstationConfig
95 fields = (
96 *GENERAL_FIELDS,
97 *PLUGIN_FIELDS,
98 *LINKED_FIELDS,
99 )
100
101 widgets = {
102 "overlay_segments": JSONEditorWidget(
103 schema=OVERLAY_SEGMENTS_SCHEMA
104 ),
105 "key_bindings": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),
106 "default_annotation_color": ColorEditorWidget(format="hex"),
107 "window_presets": Select2MultipleWidget,
108 "overlay_luts": Select2MultipleWidget,
109 }
110 help_texts = {
111 "overlay_segments": (
112 model._meta.get_field("overlay_segments").help_text
113 + ". If an categorical overlay is shown, it is possible to show toggles "
114 "to change the visibility of the different overlay categories. To do "
115 "so, configure the categories that should be displayed. Data from the"
116 " algorithm's output.json can be added as an extra label to each "
117 "toggle using jinja templating. "
118 'For example: [{ "voxel_value": 0, "name": "Level 0", "visible": '
119 'false, "metric_template": "{{metrics.volumes[0]}} mm³"},]'
120 ),
121 "key_bindings": model._meta.get_field("key_bindings").help_text
122 + ". A copy and paste JSON can be obtained from the viewer.",
123 }
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py
--- a/app/grandchallenge/workstation_configs/forms.py
+++ b/app/grandchallenge/workstation_configs/forms.py
@@ -79,7 +79,7 @@
),
Fieldset(
"Linking Configuration",
- *PLUGIN_FIELDS,
+ *LINKED_FIELDS,
css_class="border rounded px-2 my-4",
),
)
| {"golden_diff": "diff --git a/app/grandchallenge/workstation_configs/forms.py b/app/grandchallenge/workstation_configs/forms.py\n--- a/app/grandchallenge/workstation_configs/forms.py\n+++ b/app/grandchallenge/workstation_configs/forms.py\n@@ -79,7 +79,7 @@\n ),\n Fieldset(\n \"Linking Configuration\",\n- *PLUGIN_FIELDS,\n+ *LINKED_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n", "issue": "Viewer configuration does not show linking options\n**Describe the bug**\r\nThe view and edit pages for viewer configurations no longer show options to set the linking configuration.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://grand-challenge.org/viewer-configurations/demo-rse/\r\n2. Scroll down to 'Linking Configuration'\r\nThe options displayed are duplicates of the 'Plugin and Tools' section.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**Smartphone (please complete the following information):**\r\n - Device: [e.g. iPhone6]\r\n - OS: [e.g. iOS8.1]\r\n - Browser [e.g. stock browser, safari]\r\n - Version [e.g. 22]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}], "after_files": [{"content": "from crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Layout, Submit\nfrom django.forms import ModelForm\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom grandchallenge.core.forms import SaveFormInitMixin\nfrom grandchallenge.core.widgets import ColorEditorWidget, JSONEditorWidget\nfrom grandchallenge.workstation_configs.models import (\n KEY_BINDINGS_SCHEMA,\n OVERLAY_SEGMENTS_SCHEMA,\n WorkstationConfig,\n)\n\nGENERAL_FIELDS = (\n \"title\",\n \"description\",\n \"image_context\",\n \"window_presets\",\n \"default_window_preset\",\n \"default_slab_thickness_mm\",\n \"default_slab_render_method\",\n \"default_orientation\",\n \"default_image_interpolation\",\n \"default_limit_view_area_to_image_volume\",\n \"default_overlay_alpha\",\n \"ghosting_slice_depth\",\n \"overlay_luts\",\n \"default_overlay_lut\",\n \"default_overlay_interpolation\",\n \"overlay_segments\",\n \"key_bindings\",\n \"default_zoom_scale\",\n \"default_brush_size\",\n \"default_annotation_color\",\n \"default_annotation_line_width\",\n \"auto_jump_center_of_gravity\",\n \"point_bounding_box_size_mm\",\n)\nPLUGIN_FIELDS = (\n \"show_image_info_plugin\",\n \"show_display_plugin\",\n \"show_image_switcher_plugin\",\n \"show_algorithm_output_plugin\",\n \"show_overlay_plugin\",\n \"show_annotation_statistics_plugin\",\n \"show_swivel_tool\",\n \"show_invert_tool\",\n \"show_flip_tool\",\n \"show_window_level_tool\",\n \"show_reset_tool\",\n \"show_overlay_selection_tool\",\n \"show_lut_selection_tool\",\n \"show_annotation_counter_tool\",\n \"enable_contrast_enhancement\",\n)\nLINKED_FIELDS = (\n \"link_images\",\n \"link_panning\",\n \"link_zooming\",\n \"link_slicing\",\n \"link_orienting\",\n \"link_windowing\",\n \"link_inverting\",\n \"link_flipping\",\n)\n\n\nclass WorkstationConfigForm(SaveFormInitMixin, ModelForm):\n def __init__(self, *args, read_only=False, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n self.helper.layout = Layout(\n Fieldset(\"\", *GENERAL_FIELDS),\n Fieldset(\n \"Plugins and Tools\",\n *PLUGIN_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n Fieldset(\n \"Linking Configuration\",\n *LINKED_FIELDS,\n css_class=\"border rounded px-2 my-4\",\n ),\n )\n\n if read_only:\n for field in self.fields:\n self.fields[field].disabled = True\n else:\n self.helper.layout.append(Submit(\"save\", \"Save\"))\n\n class Meta:\n model = WorkstationConfig\n fields = (\n *GENERAL_FIELDS,\n *PLUGIN_FIELDS,\n *LINKED_FIELDS,\n )\n\n widgets = {\n \"overlay_segments\": JSONEditorWidget(\n schema=OVERLAY_SEGMENTS_SCHEMA\n ),\n \"key_bindings\": JSONEditorWidget(schema=KEY_BINDINGS_SCHEMA),\n \"default_annotation_color\": ColorEditorWidget(format=\"hex\"),\n \"window_presets\": Select2MultipleWidget,\n \"overlay_luts\": Select2MultipleWidget,\n }\n help_texts = {\n \"overlay_segments\": (\n model._meta.get_field(\"overlay_segments\").help_text\n + \". If an categorical overlay is shown, it is possible to show toggles \"\n \"to change the visibility of the different overlay categories. To do \"\n \"so, configure the categories that should be displayed. Data from the\"\n \" algorithm's output.json can be added as an extra label to each \"\n \"toggle using jinja templating. \"\n 'For example: [{ \"voxel_value\": 0, \"name\": \"Level 0\", \"visible\": '\n 'false, \"metric_template\": \"{{metrics.volumes[0]}} mm\u00b3\"},]'\n ),\n \"key_bindings\": model._meta.get_field(\"key_bindings\").help_text\n + \". A copy and paste JSON can be obtained from the viewer.\",\n }\n", "path": "app/grandchallenge/workstation_configs/forms.py"}]} | 1,621 | 102 |
gh_patches_debug_36194 | rasdani/github-patches | git_diff | rlworkgroup__garage-625 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FireReset is firing warnings in the CI
from recent tests runs
```sh
UserWarning: WARN: <class 'garage.envs.wrappers.fire_reset.FireReset'> doesn't implement 'step' method, which is required for wrappers derived directly from Wrapper. Deprecated default implementation is used.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `garage/envs/wrappers/noop.py`
Content:
```
1 """Noop wrapper for gym.Env."""
2 import gym
3 import numpy as np
4
5
6 class Noop(gym.Wrapper):
7 """
8 Noop wrapper for gym.Env.
9
10 It samples initial states by taking random number of no-ops on reset.
11 No-op is assumed to be action 0.
12
13 Args:
14 env: The environment to be wrapped.
15 noop_max: Maximum number no-op to be performed on reset.
16 """
17
18 def __init__(self, env, noop_max=30):
19 super().__init__(env)
20 self._noop_max = noop_max
21 self._noop_action = 0
22 assert noop_max > 0, "noop_max should be larger than 0!"
23 assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (
24 "No-op should be the 0-th action but it's not in {}!".format(env))
25
26 def reset(self, **kwargs):
27 """gym.Env reset function."""
28 obs = self.env.reset(**kwargs)
29 noops = np.random.randint(1, self._noop_max + 1)
30 for _ in range(noops):
31 obs, _, done, _ = self.env.step(self._noop_action)
32 if done:
33 obs = self.env.reset(**kwargs)
34 return obs
35
```
Path: `garage/envs/wrappers/fire_reset.py`
Content:
```
1 """Fire reset wrapper for gym.Env."""
2 import gym
3
4
5 class FireReset(gym.Wrapper):
6 """
7 Fire reset wrapper for gym.Env.
8
9 Take action "fire" on reset.
10
11 Args:
12 env: The environment to be wrapped.
13 """
14
15 def __init__(self, env):
16 super().__init__(env)
17 assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (
18 "Only use fire reset wrapper for suitable environment!")
19 assert len(env.unwrapped.get_action_meanings()) >= 3, (
20 "Only use fire reset wrapper for suitable environment!")
21
22 def reset(self, **kwargs):
23 """gym.Env reset function."""
24 self.env.reset(**kwargs)
25 obs, _, done, _ = self.env.step(1)
26 if done:
27 obs = self.env.reset(**kwargs)
28 return obs
29
```
Path: `garage/envs/wrappers/clip_reward.py`
Content:
```
1 """Clip reward for gym.Env."""
2 import gym
3 import numpy as np
4
5
6 class ClipReward(gym.Wrapper):
7 """Clip the reward by its sign."""
8
9 def step(self, ac):
10 """gym.Env step function."""
11 obs, reward, done, info = self.env.step(ac)
12 return obs, np.sign(reward), done, info
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/garage/envs/wrappers/clip_reward.py b/garage/envs/wrappers/clip_reward.py
--- a/garage/envs/wrappers/clip_reward.py
+++ b/garage/envs/wrappers/clip_reward.py
@@ -10,3 +10,7 @@
"""gym.Env step function."""
obs, reward, done, info = self.env.step(ac)
return obs, np.sign(reward), done, info
+
+ def reset(self):
+ """gym.Env reset."""
+ return self.env.reset()
diff --git a/garage/envs/wrappers/fire_reset.py b/garage/envs/wrappers/fire_reset.py
--- a/garage/envs/wrappers/fire_reset.py
+++ b/garage/envs/wrappers/fire_reset.py
@@ -15,9 +15,9 @@
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (
- "Only use fire reset wrapper for suitable environment!")
+ 'Only use fire reset wrapper for suitable environment!')
assert len(env.unwrapped.get_action_meanings()) >= 3, (
- "Only use fire reset wrapper for suitable environment!")
+ 'Only use fire reset wrapper for suitable environment!')
def reset(self, **kwargs):
"""gym.Env reset function."""
@@ -26,3 +26,7 @@
if done:
obs = self.env.reset(**kwargs)
return obs
+
+ def step(self, action):
+ """gym.Env step function."""
+ return self.env.step(action)
diff --git a/garage/envs/wrappers/noop.py b/garage/envs/wrappers/noop.py
--- a/garage/envs/wrappers/noop.py
+++ b/garage/envs/wrappers/noop.py
@@ -19,7 +19,7 @@
super().__init__(env)
self._noop_max = noop_max
self._noop_action = 0
- assert noop_max > 0, "noop_max should be larger than 0!"
+ assert noop_max > 0, 'noop_max should be larger than 0!'
assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (
"No-op should be the 0-th action but it's not in {}!".format(env))
@@ -28,7 +28,11 @@
obs = self.env.reset(**kwargs)
noops = np.random.randint(1, self._noop_max + 1)
for _ in range(noops):
- obs, _, done, _ = self.env.step(self._noop_action)
+ obs, _, done, _ = self.step(self._noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
+
+ def step(self, action):
+ """gym.Env step function."""
+ return self.env.step(action)
| {"golden_diff": "diff --git a/garage/envs/wrappers/clip_reward.py b/garage/envs/wrappers/clip_reward.py\n--- a/garage/envs/wrappers/clip_reward.py\n+++ b/garage/envs/wrappers/clip_reward.py\n@@ -10,3 +10,7 @@\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n+\n+ def reset(self):\n+ \"\"\"gym.Env reset.\"\"\"\n+ return self.env.reset()\ndiff --git a/garage/envs/wrappers/fire_reset.py b/garage/envs/wrappers/fire_reset.py\n--- a/garage/envs/wrappers/fire_reset.py\n+++ b/garage/envs/wrappers/fire_reset.py\n@@ -15,9 +15,9 @@\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n- \"Only use fire reset wrapper for suitable environment!\")\n+ 'Only use fire reset wrapper for suitable environment!')\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n- \"Only use fire reset wrapper for suitable environment!\")\n+ 'Only use fire reset wrapper for suitable environment!')\n \n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n@@ -26,3 +26,7 @@\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n+\n+ def step(self, action):\n+ \"\"\"gym.Env step function.\"\"\"\n+ return self.env.step(action)\ndiff --git a/garage/envs/wrappers/noop.py b/garage/envs/wrappers/noop.py\n--- a/garage/envs/wrappers/noop.py\n+++ b/garage/envs/wrappers/noop.py\n@@ -19,7 +19,7 @@\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n- assert noop_max > 0, \"noop_max should be larger than 0!\"\n+ assert noop_max > 0, 'noop_max should be larger than 0!'\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n \n@@ -28,7 +28,11 @@\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n- obs, _, done, _ = self.env.step(self._noop_action)\n+ obs, _, done, _ = self.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n+\n+ def step(self, action):\n+ \"\"\"gym.Env step function.\"\"\"\n+ return self.env.step(action)\n", "issue": "FireReset is firing warnings in the CI\nfrom recent tests runs\r\n```sh\r\nUserWarning: WARN: <class 'garage.envs.wrappers.fire_reset.FireReset'> doesn't implement 'step' method, which is required for wrappers derived directly from Wrapper. Deprecated default implementation is used.\r\n```\n", "before_files": [{"content": "\"\"\"Noop wrapper for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass Noop(gym.Wrapper):\n \"\"\"\n Noop wrapper for gym.Env.\n\n It samples initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n\n Args:\n env: The environment to be wrapped.\n noop_max: Maximum number no-op to be performed on reset.\n \"\"\"\n\n def __init__(self, env, noop_max=30):\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n assert noop_max > 0, \"noop_max should be larger than 0!\"\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n", "path": "garage/envs/wrappers/noop.py"}, {"content": "\"\"\"Fire reset wrapper for gym.Env.\"\"\"\nimport gym\n\n\nclass FireReset(gym.Wrapper):\n \"\"\"\n Fire reset wrapper for gym.Env.\n\n Take action \"fire\" on reset.\n\n Args:\n env: The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n \"Only use fire reset wrapper for suitable environment!\")\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n \"Only use fire reset wrapper for suitable environment!\")\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n", "path": "garage/envs/wrappers/fire_reset.py"}, {"content": "\"\"\"Clip reward for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass ClipReward(gym.Wrapper):\n \"\"\"Clip the reward by its sign.\"\"\"\n\n def step(self, ac):\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n", "path": "garage/envs/wrappers/clip_reward.py"}], "after_files": [{"content": "\"\"\"Noop wrapper for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass Noop(gym.Wrapper):\n \"\"\"\n Noop wrapper for gym.Env.\n\n It samples initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n\n Args:\n env: The environment to be wrapped.\n noop_max: Maximum number no-op to be performed on reset.\n \"\"\"\n\n def __init__(self, env, noop_max=30):\n super().__init__(env)\n self._noop_max = noop_max\n self._noop_action = 0\n assert noop_max > 0, 'noop_max should be larger than 0!'\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP', (\n \"No-op should be the 0-th action but it's not in {}!\".format(env))\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n obs = self.env.reset(**kwargs)\n noops = np.random.randint(1, self._noop_max + 1)\n for _ in range(noops):\n obs, _, done, _ = self.step(self._noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, action):\n \"\"\"gym.Env step function.\"\"\"\n return self.env.step(action)\n", "path": "garage/envs/wrappers/noop.py"}, {"content": "\"\"\"Fire reset wrapper for gym.Env.\"\"\"\nimport gym\n\n\nclass FireReset(gym.Wrapper):\n \"\"\"\n Fire reset wrapper for gym.Env.\n\n Take action \"fire\" on reset.\n\n Args:\n env: The environment to be wrapped.\n \"\"\"\n\n def __init__(self, env):\n super().__init__(env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE', (\n 'Only use fire reset wrapper for suitable environment!')\n assert len(env.unwrapped.get_action_meanings()) >= 3, (\n 'Only use fire reset wrapper for suitable environment!')\n\n def reset(self, **kwargs):\n \"\"\"gym.Env reset function.\"\"\"\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, action):\n \"\"\"gym.Env step function.\"\"\"\n return self.env.step(action)\n", "path": "garage/envs/wrappers/fire_reset.py"}, {"content": "\"\"\"Clip reward for gym.Env.\"\"\"\nimport gym\nimport numpy as np\n\n\nclass ClipReward(gym.Wrapper):\n \"\"\"Clip the reward by its sign.\"\"\"\n\n def step(self, ac):\n \"\"\"gym.Env step function.\"\"\"\n obs, reward, done, info = self.env.step(ac)\n return obs, np.sign(reward), done, info\n\n def reset(self):\n \"\"\"gym.Env reset.\"\"\"\n return self.env.reset()\n", "path": "garage/envs/wrappers/clip_reward.py"}]} | 1,033 | 662 |
gh_patches_debug_660 | rasdani/github-patches | git_diff | pex-tool__pex-2153 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.137
On the docket:
+ [x] A locked requirement with mixed artifact types fails to lock. #2150
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.136"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.136"
+__version__ = "2.1.137"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.136\"\n+__version__ = \"2.1.137\"\n", "issue": "Release 2.1.137\nOn the docket:\r\n+ [x] A locked requirement with mixed artifact types fails to lock. #2150\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.136\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.137\"\n", "path": "pex/version.py"}]} | 344 | 98 |
gh_patches_debug_19672 | rasdani/github-patches | git_diff | NVIDIA__apex-620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
_amp_state determines whether running in distributed at import
It looks like the `_amp_state.py` module determines whether pytorch is running in distributed mode at the import level. The `distributed` only seems to be used in `maybe_print`. See code snippet:
https://github.com/NVIDIA/apex/blob/37cdaf4ad57ab4e7dd9ef13dbed7b29aa939d061/apex/amp/_amp_state.py#L38-L52
This causes a couple issues:
1. It will only support the `env://` initialization of torch distributed
2. It will fail if amp is imported before launching the distributed training
Neither of these is an issue for most, since most people launch via `torch.distributed.launch`. However, it can be an issue if you define your own distributed launch function or use `torch.multiprocessing.spawn`. I can't see a good reason to do it this way anyway, as it appears this variable is only used in the `maybe_print` function. I'll submit a pull request to fix this. Let me know if I'm missing something though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apex/amp/_amp_state.py`
Content:
```
1 # This is a "header object" that allows different amp modules to communicate.
2 # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
3 # But apparently it's ok:
4 # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm
5 import os
6 import torch
7
8 TORCH_MAJOR = int(torch.__version__.split('.')[0])
9 TORCH_MINOR = int(torch.__version__.split('.')[1])
10
11 if TORCH_MAJOR == 0:
12 import collections.abc as container_abcs
13 else:
14 from torch._six import container_abcs
15
16
17 class AmpState(object):
18 def __init__(self):
19 self.hard_override=False
20 self.allow_incoming_model_not_fp32 = False
21 self.verbosity=1
22
23
24 # Attribute stash. Could also just stash things as global module attributes.
25 _amp_state = AmpState()
26
27
28 def warn_or_err(msg):
29 if _amp_state.hard_override:
30 print("Warning: " + msg)
31 else:
32 raise RuntimeError(msg)
33 # I'm not sure if allowing hard_override is a good idea.
34 # + " If you're sure you know what you're doing, supply " +
35 # "hard_override=True to amp.initialize.")
36
37
38 distributed = False
39 if 'WORLD_SIZE' in os.environ:
40 distributed = int(os.environ['WORLD_SIZE']) > 1
41
42
43 def maybe_print(msg, rank0=False):
44 if _amp_state.verbosity > 0:
45 if rank0:
46 if distributed:
47 if torch.distributed.get_rank() == 0:
48 print(msg)
49 else:
50 print(msg)
51 else:
52 print(msg)
53
54
55 # def iter_params(param_groups):
56 # for group in param_groups:
57 # for p in group['params']:
58 # yield p
59
60
61 def master_params(optimizer):
62 """
63 Generator expression that iterates over the params owned by ``optimizer``.
64
65 Args:
66 optimizer: An optimizer previously returned from ``amp.initialize``.
67 """
68 for group in optimizer.param_groups:
69 for p in group['params']:
70 yield p
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apex/amp/_amp_state.py b/apex/amp/_amp_state.py
--- a/apex/amp/_amp_state.py
+++ b/apex/amp/_amp_state.py
@@ -1,5 +1,5 @@
# This is a "header object" that allows different amp modules to communicate.
-# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
+# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.
# But apparently it's ok:
# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm
import os
@@ -35,12 +35,9 @@
# "hard_override=True to amp.initialize.")
-distributed = False
-if 'WORLD_SIZE' in os.environ:
- distributed = int(os.environ['WORLD_SIZE']) > 1
-
-
def maybe_print(msg, rank0=False):
+ distributed = torch.distributed.is_initialized() and \
+ torch.distributed.get_world_size() > 1
if _amp_state.verbosity > 0:
if rank0:
if distributed:
| {"golden_diff": "diff --git a/apex/amp/_amp_state.py b/apex/amp/_amp_state.py\n--- a/apex/amp/_amp_state.py\n+++ b/apex/amp/_amp_state.py\n@@ -1,5 +1,5 @@\n # This is a \"header object\" that allows different amp modules to communicate.\n-# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. \n+# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.\n # But apparently it's ok:\n # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\n import os\n@@ -35,12 +35,9 @@\n # \"hard_override=True to amp.initialize.\")\n \n \n-distributed = False\n-if 'WORLD_SIZE' in os.environ:\n- distributed = int(os.environ['WORLD_SIZE']) > 1\n-\n-\n def maybe_print(msg, rank0=False):\n+ distributed = torch.distributed.is_initialized() and \\\n+ torch.distributed.get_world_size() > 1\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n", "issue": "_amp_state determines whether running in distributed at import\nIt looks like the `_amp_state.py` module determines whether pytorch is running in distributed mode at the import level. The `distributed` only seems to be used in `maybe_print`. See code snippet:\r\n\r\nhttps://github.com/NVIDIA/apex/blob/37cdaf4ad57ab4e7dd9ef13dbed7b29aa939d061/apex/amp/_amp_state.py#L38-L52\r\n\r\nThis causes a couple issues:\r\n\r\n1. It will only support the `env://` initialization of torch distributed\r\n2. It will fail if amp is imported before launching the distributed training\r\n\r\nNeither of these is an issue for most, since most people launch via `torch.distributed.launch`. However, it can be an issue if you define your own distributed launch function or use `torch.multiprocessing.spawn`. I can't see a good reason to do it this way anyway, as it appears this variable is only used in the `maybe_print` function. I'll submit a pull request to fix this. Let me know if I'm missing something though.\n", "before_files": [{"content": "# This is a \"header object\" that allows different amp modules to communicate.\n# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. \n# But apparently it's ok:\n# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\nimport os\nimport torch\n\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0:\n import collections.abc as container_abcs\nelse:\n from torch._six import container_abcs\n\n\nclass AmpState(object):\n def __init__(self):\n self.hard_override=False\n self.allow_incoming_model_not_fp32 = False\n self.verbosity=1\n\n\n# Attribute stash. Could also just stash things as global module attributes.\n_amp_state = AmpState()\n\n\ndef warn_or_err(msg):\n if _amp_state.hard_override:\n print(\"Warning: \" + msg)\n else:\n raise RuntimeError(msg)\n # I'm not sure if allowing hard_override is a good idea.\n # + \" If you're sure you know what you're doing, supply \" +\n # \"hard_override=True to amp.initialize.\")\n\n\ndistributed = False\nif 'WORLD_SIZE' in os.environ:\n distributed = int(os.environ['WORLD_SIZE']) > 1\n\n\ndef maybe_print(msg, rank0=False):\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n if torch.distributed.get_rank() == 0:\n print(msg)\n else:\n print(msg)\n else:\n print(msg)\n\n\n# def iter_params(param_groups):\n# for group in param_groups:\n# for p in group['params']:\n# yield p\n\n\ndef master_params(optimizer):\n \"\"\"\n Generator expression that iterates over the params owned by ``optimizer``.\n\n Args:\n optimizer: An optimizer previously returned from ``amp.initialize``.\n \"\"\"\n for group in optimizer.param_groups:\n for p in group['params']:\n yield p\n", "path": "apex/amp/_amp_state.py"}], "after_files": [{"content": "# This is a \"header object\" that allows different amp modules to communicate.\n# I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like.\n# But apparently it's ok:\n# http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm\nimport os\nimport torch\n\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0:\n import collections.abc as container_abcs\nelse:\n from torch._six import container_abcs\n\n\nclass AmpState(object):\n def __init__(self):\n self.hard_override=False\n self.allow_incoming_model_not_fp32 = False\n self.verbosity=1\n\n\n# Attribute stash. Could also just stash things as global module attributes.\n_amp_state = AmpState()\n\n\ndef warn_or_err(msg):\n if _amp_state.hard_override:\n print(\"Warning: \" + msg)\n else:\n raise RuntimeError(msg)\n # I'm not sure if allowing hard_override is a good idea.\n # + \" If you're sure you know what you're doing, supply \" +\n # \"hard_override=True to amp.initialize.\")\n\n\ndef maybe_print(msg, rank0=False):\n distributed = torch.distributed.is_initialized() and \\\n torch.distributed.get_world_size() > 1\n if _amp_state.verbosity > 0:\n if rank0:\n if distributed:\n if torch.distributed.get_rank() == 0:\n print(msg)\n else:\n print(msg)\n else:\n print(msg)\n\n\n# def iter_params(param_groups):\n# for group in param_groups:\n# for p in group['params']:\n# yield p\n\n\ndef master_params(optimizer):\n \"\"\"\n Generator expression that iterates over the params owned by ``optimizer``.\n\n Args:\n optimizer: An optimizer previously returned from ``amp.initialize``.\n \"\"\"\n for group in optimizer.param_groups:\n for p in group['params']:\n yield p\n", "path": "apex/amp/_amp_state.py"}]} | 1,101 | 268 |
gh_patches_debug_24424 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-2112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Domain request table: Capture the "CISA region" a `domain request` is in
### Issue description
_As an analyst
I want to know which [CISA region](https://www.cisa.gov/about/regions) a request is in
so that if my research doesn't determine a request is authentic/eligible, I can know which region I need to reach out to_
Each US state/territory is in a "CISA region". When we need additional assistance in verification, we can reach out to regional contacts. Having regional designations in-registrar means we don't have to look it up, saving analyst time.
### Acceptance criteria
- [ ] For **non-federal requests**, the domain request detail page shows the CISA Region number based on the state abbreviation code of the organization address in the request. (See markup in "Additional Context")
- [ ] For **federal requests**, the domain request detail page shows "N/A" for the CISA region number, and no table lookup is necessary.
- [ ] The CISA region is not displayed on any user-facing views.
### Additional context
Please make this reusable by domain information as well. consider domain helper or other utility that can then take in the org address state/territory abbreviation and returns the number for the region. Regions and state abbreviations haven't changed in a long time and aren't expected to change in the future, they do not need to be in a table and a simple dictionary lookup (while ugly) is probably the simplest solution.
Based on the official two-letter state/territory abbreviation from the organization address, use the data in the following table to determine the region number:
| Region | Locales |
| ------- | ----- |
| 1 |Connecticut, Maine, Massachusetts, New Hampshire, Rhode Island, Vermont|
| 2 |New Jersey, New York, Puerto Rico, U.S. Virgin Islands|
|3| Delaware, District of Columbia, Maryland, Pennsylvania, Virginia, West Virginia|
|4| Alabama, Florida, Georgia, Kentucky, Mississippi, North Carolina, South Carolina, Tennessee|
|5| Illinois, Indiana, Michigan, Minnesota, Ohio, Wisconsin|
|6| Arkansas, Louisiana, New Mexico, Oklahoma, Texas|
|7|Iowa, Kansas, Missouri, Nebraska|
|8|Colorado, Montana, North Dakota, South Dakota, Utah, Wyoming|
|9|Arizona, California, Hawaii, Nevada, Guam, American Samoa, Commonwealth of the Northern Mariana Islands|
|10|Alaska, Idaho, Oregon, Washington|
Click Image to open Miro:
[](https://miro.com/app/board/uXjVKNvtde0=/?moveToWidget=3458764587423408611&cot=14)
### Links to other issues
Blocks #2095
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/templatetags/custom_filters.py`
Content:
```
1 import logging
2 from django import template
3 import re
4 from registrar.models.domain_request import DomainRequest
5
6 register = template.Library()
7 logger = logging.getLogger(__name__)
8
9
10 @register.filter(name="extract_value")
11 def extract_value(html_input):
12 match = re.search(r'value="([^"]*)"', html_input)
13 if match:
14 return match.group(1)
15 return ""
16
17
18 @register.filter
19 def extract_a_text(value):
20 # Use regex to extract the text within the <a> tag
21 pattern = r"<a\b[^>]*>(.*?)</a>"
22 match = re.search(pattern, value)
23 if match:
24 extracted_text = match.group(1)
25 else:
26 extracted_text = ""
27
28 return extracted_text
29
30
31 @register.filter
32 def find_index(haystack, needle):
33 try:
34 return haystack.index(needle)
35 except ValueError:
36 return -1
37
38
39 @register.filter
40 def slice_after(value, substring):
41 index = value.find(substring)
42 if index != -1:
43 result = value[index + len(substring) :]
44 return result
45 return value
46
47
48 @register.filter
49 def contains_checkbox(html_list):
50 for html_string in html_list:
51 if re.search(r'<input[^>]*type="checkbox"', html_string):
52 return True
53 return False
54
55
56 @register.filter
57 def get_organization_long_name(generic_org_type):
58 organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)
59 long_form_type = organization_choices_dict[generic_org_type]
60 if long_form_type is None:
61 logger.error("Organization type error, triggered by a template's custom filter")
62 return "Error"
63
64 return long_form_type
65
66
67 @register.filter(name="has_permission")
68 def has_permission(user, permission):
69 return user.has_perm(permission)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py
--- a/src/registrar/templatetags/custom_filters.py
+++ b/src/registrar/templatetags/custom_filters.py
@@ -67,3 +67,69 @@
@register.filter(name="has_permission")
def has_permission(user, permission):
return user.has_perm(permission)
+
+
[email protected]
+def get_region(state):
+ if state and isinstance(state, str):
+ regions = {
+ "CT": 1,
+ "ME": 1,
+ "MA": 1,
+ "NH": 1,
+ "RI": 1,
+ "VT": 1,
+ "NJ": 2,
+ "NY": 2,
+ "PR": 2,
+ "VI": 2,
+ "DE": 3,
+ "DC": 3,
+ "MD": 3,
+ "PA": 3,
+ "VA": 3,
+ "WV": 3,
+ "AL": 4,
+ "FL": 4,
+ "GA": 4,
+ "KY": 4,
+ "MS": 4,
+ "NC": 4,
+ "SC": 4,
+ "TN": 4,
+ "IL": 5,
+ "IN": 5,
+ "MI": 5,
+ "MN": 5,
+ "OH": 5,
+ "WI": 5,
+ "AR": 6,
+ "LA": 6,
+ "NM": 6,
+ "OK": 6,
+ "TX": 6,
+ "IA": 7,
+ "KS": 7,
+ "MO": 7,
+ "NE": 7,
+ "CO": 8,
+ "MT": 8,
+ "ND": 8,
+ "SD": 8,
+ "UT": 8,
+ "WY": 8,
+ "AZ": 9,
+ "CA": 9,
+ "HI": 9,
+ "NV": 9,
+ "GU": 9,
+ "AS": 9,
+ "MP": 9,
+ "AK": 10,
+ "ID": 10,
+ "OR": 10,
+ "WA": 10,
+ }
+ return regions.get(state.upper(), "N/A")
+ else:
+ return None
| {"golden_diff": "diff --git a/src/registrar/templatetags/custom_filters.py b/src/registrar/templatetags/custom_filters.py\n--- a/src/registrar/templatetags/custom_filters.py\n+++ b/src/registrar/templatetags/custom_filters.py\n@@ -67,3 +67,69 @@\n @register.filter(name=\"has_permission\")\n def has_permission(user, permission):\n return user.has_perm(permission)\n+\n+\[email protected]\n+def get_region(state):\n+ if state and isinstance(state, str):\n+ regions = {\n+ \"CT\": 1,\n+ \"ME\": 1,\n+ \"MA\": 1,\n+ \"NH\": 1,\n+ \"RI\": 1,\n+ \"VT\": 1,\n+ \"NJ\": 2,\n+ \"NY\": 2,\n+ \"PR\": 2,\n+ \"VI\": 2,\n+ \"DE\": 3,\n+ \"DC\": 3,\n+ \"MD\": 3,\n+ \"PA\": 3,\n+ \"VA\": 3,\n+ \"WV\": 3,\n+ \"AL\": 4,\n+ \"FL\": 4,\n+ \"GA\": 4,\n+ \"KY\": 4,\n+ \"MS\": 4,\n+ \"NC\": 4,\n+ \"SC\": 4,\n+ \"TN\": 4,\n+ \"IL\": 5,\n+ \"IN\": 5,\n+ \"MI\": 5,\n+ \"MN\": 5,\n+ \"OH\": 5,\n+ \"WI\": 5,\n+ \"AR\": 6,\n+ \"LA\": 6,\n+ \"NM\": 6,\n+ \"OK\": 6,\n+ \"TX\": 6,\n+ \"IA\": 7,\n+ \"KS\": 7,\n+ \"MO\": 7,\n+ \"NE\": 7,\n+ \"CO\": 8,\n+ \"MT\": 8,\n+ \"ND\": 8,\n+ \"SD\": 8,\n+ \"UT\": 8,\n+ \"WY\": 8,\n+ \"AZ\": 9,\n+ \"CA\": 9,\n+ \"HI\": 9,\n+ \"NV\": 9,\n+ \"GU\": 9,\n+ \"AS\": 9,\n+ \"MP\": 9,\n+ \"AK\": 10,\n+ \"ID\": 10,\n+ \"OR\": 10,\n+ \"WA\": 10,\n+ }\n+ return regions.get(state.upper(), \"N/A\")\n+ else:\n+ return None\n", "issue": "Domain request table: Capture the \"CISA region\" a `domain request` is in\n### Issue description\n\n_As an analyst\nI want to know which [CISA region](https://www.cisa.gov/about/regions) a request is in \nso that if my research doesn't determine a request is authentic/eligible, I can know which region I need to reach out to_\n\nEach US state/territory is in a \"CISA region\". When we need additional assistance in verification, we can reach out to regional contacts. Having regional designations in-registrar means we don't have to look it up, saving analyst time.\n\n### Acceptance criteria\n\n- [ ] For **non-federal requests**, the domain request detail page shows the CISA Region number based on the state abbreviation code of the organization address in the request. (See markup in \"Additional Context\")\n- [ ] For **federal requests**, the domain request detail page shows \"N/A\" for the CISA region number, and no table lookup is necessary.\n- [ ] The CISA region is not displayed on any user-facing views.\n\n### Additional context\nPlease make this reusable by domain information as well. consider domain helper or other utility that can then take in the org address state/territory abbreviation and returns the number for the region. Regions and state abbreviations haven't changed in a long time and aren't expected to change in the future, they do not need to be in a table and a simple dictionary lookup (while ugly) is probably the simplest solution.\n\nBased on the official two-letter state/territory abbreviation from the organization address, use the data in the following table to determine the region number:\n\n| Region | Locales |\n| ------- | ----- |\n| 1 |Connecticut, Maine, Massachusetts, New Hampshire, Rhode Island, Vermont|\n| 2 |New Jersey, New York, Puerto Rico, U.S. Virgin Islands|\n|3| Delaware, District of Columbia, Maryland, Pennsylvania, Virginia, West Virginia|\n|4| Alabama, Florida, Georgia, Kentucky, Mississippi, North Carolina, South Carolina, Tennessee|\n|5| Illinois, Indiana, Michigan, Minnesota, Ohio, Wisconsin|\n|6| Arkansas, Louisiana, New Mexico, Oklahoma, Texas|\n|7|Iowa, Kansas, Missouri, Nebraska|\n|8|Colorado, Montana, North Dakota, South Dakota, Utah, Wyoming|\n|9|Arizona, California, Hawaii, Nevada, Guam, American Samoa, Commonwealth of the Northern Mariana Islands|\n|10|Alaska, Idaho, Oregon, Washington|\n\nClick Image to open Miro:\n[](https://miro.com/app/board/uXjVKNvtde0=/?moveToWidget=3458764587423408611&cot=14)\n\n### Links to other issues\n\nBlocks #2095 \n", "before_files": [{"content": "import logging\nfrom django import template\nimport re\nfrom registrar.models.domain_request import DomainRequest\n\nregister = template.Library()\nlogger = logging.getLogger(__name__)\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n\n\[email protected]\ndef get_organization_long_name(generic_org_type):\n organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)\n long_form_type = organization_choices_dict[generic_org_type]\n if long_form_type is None:\n logger.error(\"Organization type error, triggered by a template's custom filter\")\n return \"Error\"\n\n return long_form_type\n\n\[email protected](name=\"has_permission\")\ndef has_permission(user, permission):\n return user.has_perm(permission)\n", "path": "src/registrar/templatetags/custom_filters.py"}], "after_files": [{"content": "import logging\nfrom django import template\nimport re\nfrom registrar.models.domain_request import DomainRequest\n\nregister = template.Library()\nlogger = logging.getLogger(__name__)\n\n\[email protected](name=\"extract_value\")\ndef extract_value(html_input):\n match = re.search(r'value=\"([^\"]*)\"', html_input)\n if match:\n return match.group(1)\n return \"\"\n\n\[email protected]\ndef extract_a_text(value):\n # Use regex to extract the text within the <a> tag\n pattern = r\"<a\\b[^>]*>(.*?)</a>\"\n match = re.search(pattern, value)\n if match:\n extracted_text = match.group(1)\n else:\n extracted_text = \"\"\n\n return extracted_text\n\n\[email protected]\ndef find_index(haystack, needle):\n try:\n return haystack.index(needle)\n except ValueError:\n return -1\n\n\[email protected]\ndef slice_after(value, substring):\n index = value.find(substring)\n if index != -1:\n result = value[index + len(substring) :]\n return result\n return value\n\n\[email protected]\ndef contains_checkbox(html_list):\n for html_string in html_list:\n if re.search(r'<input[^>]*type=\"checkbox\"', html_string):\n return True\n return False\n\n\[email protected]\ndef get_organization_long_name(generic_org_type):\n organization_choices_dict = dict(DomainRequest.OrganizationChoicesVerbose.choices)\n long_form_type = organization_choices_dict[generic_org_type]\n if long_form_type is None:\n logger.error(\"Organization type error, triggered by a template's custom filter\")\n return \"Error\"\n\n return long_form_type\n\n\[email protected](name=\"has_permission\")\ndef has_permission(user, permission):\n return user.has_perm(permission)\n\n\[email protected]\ndef get_region(state):\n if state and isinstance(state, str):\n regions = {\n \"CT\": 1,\n \"ME\": 1,\n \"MA\": 1,\n \"NH\": 1,\n \"RI\": 1,\n \"VT\": 1,\n \"NJ\": 2,\n \"NY\": 2,\n \"PR\": 2,\n \"VI\": 2,\n \"DE\": 3,\n \"DC\": 3,\n \"MD\": 3,\n \"PA\": 3,\n \"VA\": 3,\n \"WV\": 3,\n \"AL\": 4,\n \"FL\": 4,\n \"GA\": 4,\n \"KY\": 4,\n \"MS\": 4,\n \"NC\": 4,\n \"SC\": 4,\n \"TN\": 4,\n \"IL\": 5,\n \"IN\": 5,\n \"MI\": 5,\n \"MN\": 5,\n \"OH\": 5,\n \"WI\": 5,\n \"AR\": 6,\n \"LA\": 6,\n \"NM\": 6,\n \"OK\": 6,\n \"TX\": 6,\n \"IA\": 7,\n \"KS\": 7,\n \"MO\": 7,\n \"NE\": 7,\n \"CO\": 8,\n \"MT\": 8,\n \"ND\": 8,\n \"SD\": 8,\n \"UT\": 8,\n \"WY\": 8,\n \"AZ\": 9,\n \"CA\": 9,\n \"HI\": 9,\n \"NV\": 9,\n \"GU\": 9,\n \"AS\": 9,\n \"MP\": 9,\n \"AK\": 10,\n \"ID\": 10,\n \"OR\": 10,\n \"WA\": 10,\n }\n return regions.get(state.upper(), \"N/A\")\n else:\n return None\n", "path": "src/registrar/templatetags/custom_filters.py"}]} | 1,430 | 597 |
gh_patches_debug_2096 | rasdani/github-patches | git_diff | liqd__a4-product-1097 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
archived projects accessible via activity feed
At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/organisations/views.py`
Content:
```
1 from django.contrib.messages.views import SuccessMessageMixin
2 from django.utils.translation import ugettext_lazy as _
3 from django.views import generic
4 from django.views.generic import DetailView
5
6 from adhocracy4.actions.models import Action
7 from adhocracy4.projects.models import Project
8 from adhocracy4.rules import mixins as rules_mixins
9 from apps.projects import query
10
11 from . import forms
12 from .models import Organisation
13
14
15 class OrganisationView(DetailView):
16 template_name = 'organisation_landing_page.html'
17 model = Organisation
18 slug_url_kwarg = 'organisation_slug'
19
20 def get_context_data(self, **kwargs):
21 context = super().get_context_data(**kwargs)
22
23 project_list = Project.objects\
24 .filter(organisation=self.object,
25 is_archived=False,
26 is_draft=False)
27 project_list = query.filter_viewable(
28 project_list, self.request.user
29 )
30 context['project_list'] = project_list
31
32 context['action_list'] = Action.objects\
33 .filter(project__organisation=self.object)\
34 .filter_public()\
35 .exclude_updates()[:4]
36
37 context['stats'] = {
38 'users': 1204,
39 'items': 3425,
40 'comments': 23234,
41 'ratings': 134234,
42 }
43
44 return context
45
46
47 class InformationView(DetailView):
48 template_name = 'organisation_information.html'
49 model = Organisation
50 slug_url_kwarg = 'organisation_slug'
51
52
53 class ImprintView(DetailView):
54 template_name = 'organisation_imprint.html'
55 model = Organisation
56 slug_url_kwarg = 'organisation_slug'
57
58
59 class OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,
60 SuccessMessageMixin,
61 generic.UpdateView):
62 model = Organisation
63 form_class = forms.OrganisationForm
64 slug_url_kwarg = 'organisation_slug'
65 template_name = 'organisation_form.html'
66 success_message = _('Organisation successfully updated.')
67 permission_required = 'a4_candy_organisations.change_organisation'
68 menu_item = 'organisation'
69
70 def get_success_url(self):
71 return self.request.path
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/organisations/views.py b/apps/organisations/views.py
--- a/apps/organisations/views.py
+++ b/apps/organisations/views.py
@@ -31,6 +31,7 @@
context['action_list'] = Action.objects\
.filter(project__organisation=self.object)\
+ .filter(project__is_archived=False) \
.filter_public()\
.exclude_updates()[:4]
| {"golden_diff": "diff --git a/apps/organisations/views.py b/apps/organisations/views.py\n--- a/apps/organisations/views.py\n+++ b/apps/organisations/views.py\n@@ -31,6 +31,7 @@\n \n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n+ .filter(project__is_archived=False) \\\n .filter_public()\\\n .exclude_updates()[:4]\n", "issue": "archived projects accessible via activity feed\n At https://www.beteiligung.in/liqd/ all projects are private but I can see the content of the projects if I click on the activity feed. Even if not signed in.\n", "before_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py"}], "after_files": [{"content": "from django.contrib.messages.views import SuccessMessageMixin\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic import DetailView\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.projects.models import Project\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom apps.projects import query\n\nfrom . import forms\nfrom .models import Organisation\n\n\nclass OrganisationView(DetailView):\n template_name = 'organisation_landing_page.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n project_list = Project.objects\\\n .filter(organisation=self.object,\n is_archived=False,\n is_draft=False)\n project_list = query.filter_viewable(\n project_list, self.request.user\n )\n context['project_list'] = project_list\n\n context['action_list'] = Action.objects\\\n .filter(project__organisation=self.object)\\\n .filter(project__is_archived=False) \\\n .filter_public()\\\n .exclude_updates()[:4]\n\n context['stats'] = {\n 'users': 1204,\n 'items': 3425,\n 'comments': 23234,\n 'ratings': 134234,\n }\n\n return context\n\n\nclass InformationView(DetailView):\n template_name = 'organisation_information.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass ImprintView(DetailView):\n template_name = 'organisation_imprint.html'\n model = Organisation\n slug_url_kwarg = 'organisation_slug'\n\n\nclass OrganisationUpdateView(rules_mixins.PermissionRequiredMixin,\n SuccessMessageMixin,\n generic.UpdateView):\n model = Organisation\n form_class = forms.OrganisationForm\n slug_url_kwarg = 'organisation_slug'\n template_name = 'organisation_form.html'\n success_message = _('Organisation successfully updated.')\n permission_required = 'a4_candy_organisations.change_organisation'\n menu_item = 'organisation'\n\n def get_success_url(self):\n return self.request.path\n", "path": "apps/organisations/views.py"}]} | 904 | 94 |
gh_patches_debug_33794 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider motel6 is broken
During the global build at 2021-08-18-14-42-26, spider **motel6** failed with **0 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/motel6.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/motel6.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 from locations.items import GeojsonPointItem
5
6 brand_lookup = {
7 "MS": "Motel 6",
8 "SS": "Studio 6",
9 "HS": "Hotel 6"
10 }
11
12
13 class Motel6Spider(scrapy.Spider):
14 name = "motel6"
15 allowed_domains = ["motel6.com"]
16 start_urls = (
17 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',
18 )
19
20 def parse(self, response):
21 idata = json.loads(response.body_as_unicode())
22 storeids = idata.keys()
23 URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'
24 for storeid in storeids:
25 try:
26 int(storeid)
27 except ValueError:
28 continue
29 try:
30 yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)
31 except ValueError:
32 continue
33
34 def parse_hotel(self, response):
35 mdata = json.loads(response.body_as_unicode())
36
37 properties = {
38 'ref': mdata["property_id"],
39 'name': mdata["name"],
40 'addr_full': mdata["address"],
41 'city': mdata["city"],
42 'postcode': mdata["zip"],
43 'lat': mdata["latitude"],
44 'lon': mdata["longitude"],
45 'phone': mdata["phone"],
46 'state': mdata["state"],
47 'website': mdata["microsite_url"],
48 'brand': brand_lookup[mdata["brand_id"]],
49 }
50
51 yield GeojsonPointItem(**properties)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/motel6.py b/locations/spiders/motel6.py
--- a/locations/spiders/motel6.py
+++ b/locations/spiders/motel6.py
@@ -14,20 +14,21 @@
name = "motel6"
allowed_domains = ["motel6.com"]
start_urls = (
- 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',
+ 'https://www.motel6.com/content/g6-cache/property-summary.1.json',
)
def parse(self, response):
idata = json.loads(response.body_as_unicode())
- storeids = idata.keys()
- URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'
- for storeid in storeids:
+ url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'
+
+ for storeid in idata.keys():
try:
int(storeid)
except ValueError:
continue
+
try:
- yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)
+ yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)
except ValueError:
continue
@@ -35,17 +36,17 @@
mdata = json.loads(response.body_as_unicode())
properties = {
- 'ref': mdata["property_id"],
- 'name': mdata["name"],
- 'addr_full': mdata["address"],
- 'city': mdata["city"],
- 'postcode': mdata["zip"],
- 'lat': mdata["latitude"],
- 'lon': mdata["longitude"],
- 'phone': mdata["phone"],
- 'state': mdata["state"],
- 'website': mdata["microsite_url"],
- 'brand': brand_lookup[mdata["brand_id"]],
+ 'ref': mdata["property_id"],
+ 'name': mdata["name"],
+ 'addr_full': mdata["address"],
+ 'city': mdata["city"],
+ 'postcode': mdata["zip"],
+ 'lat': mdata["latitude"],
+ 'lon': mdata["longitude"],
+ 'phone': mdata["phone"],
+ 'state': mdata["state"],
+ 'website': mdata["microsite_url"],
+ 'brand': brand_lookup[mdata["brand_id"]],
}
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/motel6.py b/locations/spiders/motel6.py\n--- a/locations/spiders/motel6.py\n+++ b/locations/spiders/motel6.py\n@@ -14,20 +14,21 @@\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n- 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',\n+ 'https://www.motel6.com/content/g6-cache/property-summary.1.json',\n )\n \n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n- storeids = idata.keys()\n- URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'\n- for storeid in storeids:\n+ url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'\n+\n+ for storeid in idata.keys():\n try:\n int(storeid)\n except ValueError:\n continue\n+\n try:\n- yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)\n+ yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n \n@@ -35,17 +36,17 @@\n mdata = json.loads(response.body_as_unicode())\n \n properties = {\n- 'ref': mdata[\"property_id\"],\n- 'name': mdata[\"name\"],\n- 'addr_full': mdata[\"address\"],\n- 'city': mdata[\"city\"],\n- 'postcode': mdata[\"zip\"],\n- 'lat': mdata[\"latitude\"],\n- 'lon': mdata[\"longitude\"],\n- 'phone': mdata[\"phone\"],\n- 'state': mdata[\"state\"],\n- 'website': mdata[\"microsite_url\"],\n- 'brand': brand_lookup[mdata[\"brand_id\"]],\n+ 'ref': mdata[\"property_id\"],\n+ 'name': mdata[\"name\"],\n+ 'addr_full': mdata[\"address\"],\n+ 'city': mdata[\"city\"],\n+ 'postcode': mdata[\"zip\"],\n+ 'lat': mdata[\"latitude\"],\n+ 'lon': mdata[\"longitude\"],\n+ 'phone': mdata[\"phone\"],\n+ 'state': mdata[\"state\"],\n+ 'website': mdata[\"microsite_url\"],\n+ 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider motel6 is broken\nDuring the global build at 2021-08-18-14-42-26, spider **motel6** failed with **0 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/motel6.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/motel6.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\nbrand_lookup = {\n \"MS\": \"Motel 6\",\n \"SS\": \"Studio 6\",\n \"HS\": \"Hotel 6\"\n}\n\n\nclass Motel6Spider(scrapy.Spider):\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n 'https://www.motel6.com/var/g6/hotel-summary/ms.infinity.1.json',\n )\n\n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n storeids = idata.keys()\n URL = 'https://www.motel6.com/var/g6/hotel-information/en/{}.json'\n for storeid in storeids:\n try:\n int(storeid)\n except ValueError:\n continue\n try:\n yield scrapy.Request(URL.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n\n def parse_hotel(self, response):\n mdata = json.loads(response.body_as_unicode())\n\n properties = {\n 'ref': mdata[\"property_id\"],\n 'name': mdata[\"name\"],\n 'addr_full': mdata[\"address\"],\n 'city': mdata[\"city\"],\n 'postcode': mdata[\"zip\"],\n 'lat': mdata[\"latitude\"],\n 'lon': mdata[\"longitude\"],\n 'phone': mdata[\"phone\"],\n 'state': mdata[\"state\"],\n 'website': mdata[\"microsite_url\"],\n 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/motel6.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\nbrand_lookup = {\n \"MS\": \"Motel 6\",\n \"SS\": \"Studio 6\",\n \"HS\": \"Hotel 6\"\n}\n\n\nclass Motel6Spider(scrapy.Spider):\n name = \"motel6\"\n allowed_domains = [\"motel6.com\"]\n start_urls = (\n 'https://www.motel6.com/content/g6-cache/property-summary.1.json',\n )\n\n def parse(self, response):\n idata = json.loads(response.body_as_unicode())\n url = 'https://www.motel6.com/bin/g6/propertydata.{}.json'\n\n for storeid in idata.keys():\n try:\n int(storeid)\n except ValueError:\n continue\n\n try:\n yield scrapy.Request(url.format(storeid), callback=self.parse_hotel)\n except ValueError:\n continue\n\n def parse_hotel(self, response):\n mdata = json.loads(response.body_as_unicode())\n\n properties = {\n 'ref': mdata[\"property_id\"],\n 'name': mdata[\"name\"],\n 'addr_full': mdata[\"address\"],\n 'city': mdata[\"city\"],\n 'postcode': mdata[\"zip\"],\n 'lat': mdata[\"latitude\"],\n 'lon': mdata[\"longitude\"],\n 'phone': mdata[\"phone\"],\n 'state': mdata[\"state\"],\n 'website': mdata[\"microsite_url\"],\n 'brand': brand_lookup[mdata[\"brand_id\"]],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/motel6.py"}]} | 896 | 553 |
gh_patches_debug_11088 | rasdani/github-patches | git_diff | getsentry__sentry-python-773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DjangoIntegration conflict with MiddlewareMixin & TemplateView
django version: 3.0
sentry-sdk version: 0.14.3
My project has a middleware using Django's `django.utils.deprecation.MiddlewareMixin`. Visiting a view which subclasses `django.views.generic.TemplateView` while Sentry is active results in the following exception:
```
AttributeError: 'function' object has no attribute '__self__'
File "django/core/handlers/exception.py", line 47, in inner
response = get_response(request)
File "django/core/handlers/base.py", line 196, in _get_response
middleware_method.__self__.__class__.__name__,
```
The following classes & url config should be sufficient to demonstrate the problem:
```
from django.utils.deprecation import MiddlewareMixin
from django.views.generic import TemplateView
# ...in myapp.middleware.py
class DemonstratesConflictMiddleware(MiddlewareMixin):
def process_template_response(self, request, response):
return response
# ...in myapp.views.py
class DemonstratesConflictView(TemplateView):
template_name = "index.html"
# ...in urls.py
import myapp.views
urlpatterns += [
path('/', myapp.views.DemonstratesConflictView.as_view(), name='throws-exception'),
]
# ... in settings.py
MIDDLEWARE += ['myapp.middleware.DemonstratesConflictMiddleware']
sentry_sdk.init(
dsn="OMITTED",
integrations=[DjangoIntegration()],
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sentry_sdk/integrations/django/middleware.py`
Content:
```
1 """
2 Create spans from Django middleware invocations
3 """
4
5 from django import VERSION as DJANGO_VERSION
6
7 from sentry_sdk import Hub
8 from sentry_sdk._functools import wraps
9 from sentry_sdk._types import MYPY
10 from sentry_sdk.utils import (
11 ContextVar,
12 transaction_from_function,
13 capture_internal_exceptions,
14 )
15
16 if MYPY:
17 from typing import Any
18 from typing import Callable
19 from typing import TypeVar
20
21 F = TypeVar("F", bound=Callable[..., Any])
22
23 _import_string_should_wrap_middleware = ContextVar(
24 "import_string_should_wrap_middleware"
25 )
26
27 if DJANGO_VERSION < (1, 7):
28 import_string_name = "import_by_path"
29 else:
30 import_string_name = "import_string"
31
32
33 def patch_django_middlewares():
34 # type: () -> None
35 from django.core.handlers import base
36
37 old_import_string = getattr(base, import_string_name)
38
39 def sentry_patched_import_string(dotted_path):
40 # type: (str) -> Any
41 rv = old_import_string(dotted_path)
42
43 if _import_string_should_wrap_middleware.get(None):
44 rv = _wrap_middleware(rv, dotted_path)
45
46 return rv
47
48 setattr(base, import_string_name, sentry_patched_import_string)
49
50 old_load_middleware = base.BaseHandler.load_middleware
51
52 def sentry_patched_load_middleware(*args, **kwargs):
53 # type: (Any, Any) -> Any
54 _import_string_should_wrap_middleware.set(True)
55 try:
56 return old_load_middleware(*args, **kwargs)
57 finally:
58 _import_string_should_wrap_middleware.set(False)
59
60 base.BaseHandler.load_middleware = sentry_patched_load_middleware
61
62
63 def _wrap_middleware(middleware, middleware_name):
64 # type: (Any, str) -> Any
65 from sentry_sdk.integrations.django import DjangoIntegration
66
67 def _get_wrapped_method(old_method):
68 # type: (F) -> F
69 with capture_internal_exceptions():
70
71 def sentry_wrapped_method(*args, **kwargs):
72 # type: (*Any, **Any) -> Any
73 hub = Hub.current
74 integration = hub.get_integration(DjangoIntegration)
75 if integration is None or not integration.middleware_spans:
76 return old_method(*args, **kwargs)
77
78 function_name = transaction_from_function(old_method)
79
80 description = middleware_name
81 function_basename = getattr(old_method, "__name__", None)
82 if function_basename:
83 description = "{}.{}".format(description, function_basename)
84
85 with hub.start_span(
86 op="django.middleware", description=description
87 ) as span:
88 span.set_tag("django.function_name", function_name)
89 span.set_tag("django.middleware_name", middleware_name)
90 return old_method(*args, **kwargs)
91
92 try:
93 # fails for __call__ of function on Python 2 (see py2.7-django-1.11)
94 return wraps(old_method)(sentry_wrapped_method) # type: ignore
95 except Exception:
96 return sentry_wrapped_method # type: ignore
97
98 return old_method
99
100 class SentryWrappingMiddleware(object):
101 def __init__(self, *args, **kwargs):
102 # type: (*Any, **Any) -> None
103 self._inner = middleware(*args, **kwargs)
104 self._call_method = None
105
106 # We need correct behavior for `hasattr()`, which we can only determine
107 # when we have an instance of the middleware we're wrapping.
108 def __getattr__(self, method_name):
109 # type: (str) -> Any
110 if method_name not in (
111 "process_request",
112 "process_view",
113 "process_template_response",
114 "process_response",
115 "process_exception",
116 ):
117 raise AttributeError()
118
119 old_method = getattr(self._inner, method_name)
120 rv = _get_wrapped_method(old_method)
121 self.__dict__[method_name] = rv
122 return rv
123
124 def __call__(self, *args, **kwargs):
125 # type: (*Any, **Any) -> Any
126 f = self._call_method
127 if f is None:
128 self._call_method = f = _get_wrapped_method(self._inner.__call__)
129 return f(*args, **kwargs)
130
131 if hasattr(middleware, "__name__"):
132 SentryWrappingMiddleware.__name__ = middleware.__name__
133
134 return SentryWrappingMiddleware
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py
--- a/sentry_sdk/integrations/django/middleware.py
+++ b/sentry_sdk/integrations/django/middleware.py
@@ -91,9 +91,14 @@
try:
# fails for __call__ of function on Python 2 (see py2.7-django-1.11)
- return wraps(old_method)(sentry_wrapped_method) # type: ignore
+ sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)
+
+ # Necessary for Django 3.1
+ sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore
except Exception:
- return sentry_wrapped_method # type: ignore
+ pass
+
+ return sentry_wrapped_method # type: ignore
return old_method
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/middleware.py b/sentry_sdk/integrations/django/middleware.py\n--- a/sentry_sdk/integrations/django/middleware.py\n+++ b/sentry_sdk/integrations/django/middleware.py\n@@ -91,9 +91,14 @@\n \n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n- return wraps(old_method)(sentry_wrapped_method) # type: ignore\n+ sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)\n+\n+ # Necessary for Django 3.1\n+ sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore\n except Exception:\n- return sentry_wrapped_method # type: ignore\n+ pass\n+\n+ return sentry_wrapped_method # type: ignore\n \n return old_method\n", "issue": "DjangoIntegration conflict with MiddlewareMixin & TemplateView\ndjango version: 3.0\r\nsentry-sdk version: 0.14.3\r\n\r\nMy project has a middleware using Django's `django.utils.deprecation.MiddlewareMixin`. Visiting a view which subclasses `django.views.generic.TemplateView` while Sentry is active results in the following exception:\r\n```\r\nAttributeError: 'function' object has no attribute '__self__'\r\n File \"django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"django/core/handlers/base.py\", line 196, in _get_response\r\n middleware_method.__self__.__class__.__name__,\r\n```\r\n\r\nThe following classes & url config should be sufficient to demonstrate the problem:\r\n\r\n```\r\nfrom django.utils.deprecation import MiddlewareMixin\r\nfrom django.views.generic import TemplateView\r\n\r\n# ...in myapp.middleware.py\r\nclass DemonstratesConflictMiddleware(MiddlewareMixin):\r\n def process_template_response(self, request, response):\r\n return response\r\n\r\n# ...in myapp.views.py\r\nclass DemonstratesConflictView(TemplateView): \r\n template_name = \"index.html\"\r\n\r\n\r\n# ...in urls.py\r\nimport myapp.views\r\nurlpatterns += [\r\n path('/', myapp.views.DemonstratesConflictView.as_view(), name='throws-exception'),\r\n]\r\n\r\n# ... in settings.py\r\n\r\nMIDDLEWARE += ['myapp.middleware.DemonstratesConflictMiddleware']\r\n\r\nsentry_sdk.init(\r\n dsn=\"OMITTED\",\r\n integrations=[DjangoIntegration()],\r\n)\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._functools import wraps\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(*args, **kwargs):\n # type: (Any, Any) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(*args, **kwargs)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n return wraps(old_method)(sentry_wrapped_method) # type: ignore\n except Exception:\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}], "after_files": [{"content": "\"\"\"\nCreate spans from Django middleware invocations\n\"\"\"\n\nfrom django import VERSION as DJANGO_VERSION\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._functools import wraps\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.utils import (\n ContextVar,\n transaction_from_function,\n capture_internal_exceptions,\n)\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import TypeVar\n\n F = TypeVar(\"F\", bound=Callable[..., Any])\n\n_import_string_should_wrap_middleware = ContextVar(\n \"import_string_should_wrap_middleware\"\n)\n\nif DJANGO_VERSION < (1, 7):\n import_string_name = \"import_by_path\"\nelse:\n import_string_name = \"import_string\"\n\n\ndef patch_django_middlewares():\n # type: () -> None\n from django.core.handlers import base\n\n old_import_string = getattr(base, import_string_name)\n\n def sentry_patched_import_string(dotted_path):\n # type: (str) -> Any\n rv = old_import_string(dotted_path)\n\n if _import_string_should_wrap_middleware.get(None):\n rv = _wrap_middleware(rv, dotted_path)\n\n return rv\n\n setattr(base, import_string_name, sentry_patched_import_string)\n\n old_load_middleware = base.BaseHandler.load_middleware\n\n def sentry_patched_load_middleware(*args, **kwargs):\n # type: (Any, Any) -> Any\n _import_string_should_wrap_middleware.set(True)\n try:\n return old_load_middleware(*args, **kwargs)\n finally:\n _import_string_should_wrap_middleware.set(False)\n\n base.BaseHandler.load_middleware = sentry_patched_load_middleware\n\n\ndef _wrap_middleware(middleware, middleware_name):\n # type: (Any, str) -> Any\n from sentry_sdk.integrations.django import DjangoIntegration\n\n def _get_wrapped_method(old_method):\n # type: (F) -> F\n with capture_internal_exceptions():\n\n def sentry_wrapped_method(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(DjangoIntegration)\n if integration is None or not integration.middleware_spans:\n return old_method(*args, **kwargs)\n\n function_name = transaction_from_function(old_method)\n\n description = middleware_name\n function_basename = getattr(old_method, \"__name__\", None)\n if function_basename:\n description = \"{}.{}\".format(description, function_basename)\n\n with hub.start_span(\n op=\"django.middleware\", description=description\n ) as span:\n span.set_tag(\"django.function_name\", function_name)\n span.set_tag(\"django.middleware_name\", middleware_name)\n return old_method(*args, **kwargs)\n\n try:\n # fails for __call__ of function on Python 2 (see py2.7-django-1.11)\n sentry_wrapped_method = wraps(old_method)(sentry_wrapped_method)\n\n # Necessary for Django 3.1\n sentry_wrapped_method.__self__ = old_method.__self__ # type: ignore\n except Exception:\n pass\n\n return sentry_wrapped_method # type: ignore\n\n return old_method\n\n class SentryWrappingMiddleware(object):\n def __init__(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n self._inner = middleware(*args, **kwargs)\n self._call_method = None\n\n # We need correct behavior for `hasattr()`, which we can only determine\n # when we have an instance of the middleware we're wrapping.\n def __getattr__(self, method_name):\n # type: (str) -> Any\n if method_name not in (\n \"process_request\",\n \"process_view\",\n \"process_template_response\",\n \"process_response\",\n \"process_exception\",\n ):\n raise AttributeError()\n\n old_method = getattr(self._inner, method_name)\n rv = _get_wrapped_method(old_method)\n self.__dict__[method_name] = rv\n return rv\n\n def __call__(self, *args, **kwargs):\n # type: (*Any, **Any) -> Any\n f = self._call_method\n if f is None:\n self._call_method = f = _get_wrapped_method(self._inner.__call__)\n return f(*args, **kwargs)\n\n if hasattr(middleware, \"__name__\"):\n SentryWrappingMiddleware.__name__ = middleware.__name__\n\n return SentryWrappingMiddleware\n", "path": "sentry_sdk/integrations/django/middleware.py"}]} | 1,863 | 213 |
gh_patches_debug_37989 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API should validate input for number columns
## Description
<!-- A clear and concise description of what the bug is. -->
Currently, the API accepts strings for values input to number-typed columns. In some cases, these strings carry locale-sensitive information, i.e., using specific decimal points and negation styles. This is a problem since confusion will arise whenever the client, service, and database have different locale settings (it's likely the client and DB will have different locale settings by default). Even worse, the locale settings in the database (assuming PostgreSQL) may be applied differently in different contexts.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Columns which use a number type for storage at the DB layer should only accept numbers in one of two formats:
- an actual JSON number, or
- A string conforming to the [JSON number spec](https://www.json.org/json-en.html), except wrapped in double-quotes.
The validation of this should be locale-independent, and should happen in the Mathesar web service rather than the database.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
- Create a table with a number-typed column containing a decimal point (e.g., `FLOAT`).
- Send an API request with input for that column as a string, with a comma for a decimal point.
- You can do this easily from the browseable API, see `/api/db/v0/tables/<table_ID>/records/<record_ID>/`
- Observe the database-layer error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/utils.py`
Content:
```
1 from rest_framework.exceptions import NotFound
2
3 from db.records.operations import group
4 from mathesar.models import Table
5
6 DATA_KEY = 'data'
7 METADATA_KEY = 'metadata'
8
9
10 def get_table_or_404(pk):
11 """
12 Get table if it exists, otherwise throws a DRF NotFound error.
13 Args:
14 pk: id of table
15 Returns:
16 table: return the table based on a specific id
17 """
18 try:
19 table = Table.objects.get(id=pk)
20 except Table.DoesNotExist:
21 raise NotFound
22 return table
23
24
25 def process_annotated_records(record_list, column_name_id_map):
26
27 RESULT_IDX = 'result_indices'
28
29 def _get_record_dict(record):
30 return record._asdict() if not isinstance(record, dict) else record
31
32 split_records = (
33 {DATA_KEY: record_dict}
34 for record_dict in (_get_record_dict(record) for record in record_list)
35 )
36
37 combined_records, groups = group.extract_group_metadata(
38 split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY
39 )
40
41 processed_records, record_metadata = zip(
42 *tuple(tuple(d.values()) for d in combined_records)
43 )
44
45 def _replace_column_names_with_ids(group_metadata_item):
46 try:
47 processed_group_metadata_item = {
48 column_name_id_map[k]: v for k, v in group_metadata_item.items()
49 }
50 except AttributeError:
51 processed_group_metadata_item = group_metadata_item
52 return processed_group_metadata_item
53
54 if groups is not None:
55 groups_by_id = {
56 grp[group.GroupMetadataField.GROUP_ID.value]: {
57 k: _replace_column_names_with_ids(v) for k, v in grp.items()
58 if k != group.GroupMetadataField.GROUP_ID.value
59 } | {RESULT_IDX: []}
60 for grp in groups
61 }
62
63 for i, meta in enumerate(record_metadata):
64 groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)
65
66 output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])
67 else:
68 output_groups = None
69
70 return processed_records, output_groups
71
```
Path: `mathesar/api/serializers/records.py`
Content:
```
1 from psycopg2.errors import NotNullViolation
2 from rest_framework import serializers
3 from rest_framework import status
4 from sqlalchemy.exc import IntegrityError
5
6 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
7 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
8
9
10 class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):
11 filter = serializers.JSONField(required=False, default=None)
12 order_by = serializers.JSONField(required=False, default=[])
13 grouping = serializers.JSONField(required=False, default={})
14 duplicate_only = serializers.JSONField(required=False, default=None)
15
16
17 class RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):
18 def update(self, instance, validated_data):
19 table = self.context['table']
20 record = table.update_record(instance['id'], validated_data)
21 return record
22
23 def create(self, validated_data):
24 table = self.context['table']
25 try:
26 record = table.create_record_or_records(validated_data)
27 except IntegrityError as e:
28 if type(e.orig) == NotNullViolation:
29 raise database_api_exceptions.NotNullViolationAPIException(
30 e,
31 status_code=status.HTTP_400_BAD_REQUEST,
32 table=table
33 )
34 else:
35 raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
36 return record
37
38 def to_representation(self, instance):
39 records = instance._asdict() if not isinstance(instance, dict) else instance
40 columns_map = self.context['columns_map']
41 records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}
42 return records
43
44 def to_internal_value(self, data):
45 columns_map = self.context['columns_map'].inverse
46 data = {columns_map[int(column_id)]: value for column_id, value in data.items()}
47 return data
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/serializers/records.py b/mathesar/api/serializers/records.py
--- a/mathesar/api/serializers/records.py
+++ b/mathesar/api/serializers/records.py
@@ -5,6 +5,8 @@
import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
+from mathesar.models import Column
+from mathesar.api.utils import follows_json_number_spec, is_number
class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):
@@ -44,4 +46,17 @@
def to_internal_value(self, data):
columns_map = self.context['columns_map'].inverse
data = {columns_map[int(column_id)]: value for column_id, value in data.items()}
+ # If the data type of the column is number then the value must be an integer
+ # or a string which follows JSON number spec.
+ for column_name in data.keys():
+ column = Column.objects.get(id=columns_map.inverse[column_name])
+ column_type = column.type
+ value = data[column_name]
+ if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):
+ raise database_api_exceptions.MathesarAPIException(
+ IntegrityError,
+ status_code=status.HTTP_400_BAD_REQUEST,
+ message="Number strings should follow JSON number spec",
+ field=column_name
+ )
return data
diff --git a/mathesar/api/utils.py b/mathesar/api/utils.py
--- a/mathesar/api/utils.py
+++ b/mathesar/api/utils.py
@@ -1,7 +1,9 @@
from rest_framework.exceptions import NotFound
+import re
from db.records.operations import group
from mathesar.models import Table
+from mathesar.database.types import _get_type_map
DATA_KEY = 'data'
METADATA_KEY = 'metadata'
@@ -68,3 +70,39 @@
output_groups = None
return processed_records, output_groups
+
+
+def is_number(column_type):
+ """
+ Check if a column data type is a number
+ Args:
+ column_type: data type of column
+ """
+ for type in _get_type_map():
+ if type['name'] == 'Number':
+ if str(column_type).lower() in type['sa_type_names']:
+ return True
+ else:
+ return False
+
+
+def follows_json_number_spec(number):
+ """
+ Check if a string follows JSON number spec
+ Args:
+ number: number as string
+ """
+ patterns = [
+ r"^-?0$",
+ r"^-?0[\.][0-9]+$",
+ r"^-?0[eE][+-]?[0-9]*$",
+ r"^-?0[\.][0-9]+[eE][+-]?[0-9]+$",
+ r"^-?[1-9][0-9]*$",
+ r"^-?[1-9][0-9]*[\.][0-9]+$",
+ r"^-?[1-9][0-9]*[eE][+-]?[0-9]+$",
+ r"^-?[1-9][0-9]*[\.][0-9]+[eE][+-]?[0-9]+$",
+ ]
+ for pattern in patterns:
+ if re.search(pattern, number) is not None:
+ return True
+ return False
| {"golden_diff": "diff --git a/mathesar/api/serializers/records.py b/mathesar/api/serializers/records.py\n--- a/mathesar/api/serializers/records.py\n+++ b/mathesar/api/serializers/records.py\n@@ -5,6 +5,8 @@\n \n import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\n from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n+from mathesar.models import Column\n+from mathesar.api.utils import follows_json_number_spec, is_number\n \n \n class RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n@@ -44,4 +46,17 @@\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n+ # If the data type of the column is number then the value must be an integer\n+ # or a string which follows JSON number spec.\n+ for column_name in data.keys():\n+ column = Column.objects.get(id=columns_map.inverse[column_name])\n+ column_type = column.type\n+ value = data[column_name]\n+ if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):\n+ raise database_api_exceptions.MathesarAPIException(\n+ IntegrityError,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ message=\"Number strings should follow JSON number spec\",\n+ field=column_name\n+ )\n return data\ndiff --git a/mathesar/api/utils.py b/mathesar/api/utils.py\n--- a/mathesar/api/utils.py\n+++ b/mathesar/api/utils.py\n@@ -1,7 +1,9 @@\n from rest_framework.exceptions import NotFound\n+import re\n \n from db.records.operations import group\n from mathesar.models import Table\n+from mathesar.database.types import _get_type_map\n \n DATA_KEY = 'data'\n METADATA_KEY = 'metadata'\n@@ -68,3 +70,39 @@\n output_groups = None\n \n return processed_records, output_groups\n+\n+\n+def is_number(column_type):\n+ \"\"\"\n+ Check if a column data type is a number\n+ Args:\n+ column_type: data type of column\n+ \"\"\"\n+ for type in _get_type_map():\n+ if type['name'] == 'Number':\n+ if str(column_type).lower() in type['sa_type_names']:\n+ return True\n+ else:\n+ return False\n+\n+\n+def follows_json_number_spec(number):\n+ \"\"\"\n+ Check if a string follows JSON number spec\n+ Args:\n+ number: number as string\n+ \"\"\"\n+ patterns = [\n+ r\"^-?0$\",\n+ r\"^-?0[\\.][0-9]+$\",\n+ r\"^-?0[eE][+-]?[0-9]*$\",\n+ r\"^-?0[\\.][0-9]+[eE][+-]?[0-9]+$\",\n+ r\"^-?[1-9][0-9]*$\",\n+ r\"^-?[1-9][0-9]*[\\.][0-9]+$\",\n+ r\"^-?[1-9][0-9]*[eE][+-]?[0-9]+$\",\n+ r\"^-?[1-9][0-9]*[\\.][0-9]+[eE][+-]?[0-9]+$\",\n+ ]\n+ for pattern in patterns:\n+ if re.search(pattern, number) is not None:\n+ return True\n+ return False\n", "issue": "API should validate input for number columns\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nCurrently, the API accepts strings for values input to number-typed columns. In some cases, these strings carry locale-sensitive information, i.e., using specific decimal points and negation styles. This is a problem since confusion will arise whenever the client, service, and database have different locale settings (it's likely the client and DB will have different locale settings by default). Even worse, the locale settings in the database (assuming PostgreSQL) may be applied differently in different contexts.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nColumns which use a number type for storage at the DB layer should only accept numbers in one of two formats:\r\n- an actual JSON number, or\r\n- A string conforming to the [JSON number spec](https://www.json.org/json-en.html), except wrapped in double-quotes.\r\n\r\nThe validation of this should be locale-independent, and should happen in the Mathesar web service rather than the database.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n- Create a table with a number-typed column containing a decimal point (e.g., `FLOAT`).\r\n- Send an API request with input for that column as a string, with a comma for a decimal point. \r\n - You can do this easily from the browseable API, see `/api/db/v0/tables/<table_ID>/records/<record_ID>/`\r\n- Observe the database-layer error.\r\n\n", "before_files": [{"content": "from rest_framework.exceptions import NotFound\n\nfrom db.records.operations import group\nfrom mathesar.models import Table\n\nDATA_KEY = 'data'\nMETADATA_KEY = 'metadata'\n\n\ndef get_table_or_404(pk):\n \"\"\"\n Get table if it exists, otherwise throws a DRF NotFound error.\n Args:\n pk: id of table\n Returns:\n table: return the table based on a specific id\n \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n\n\ndef process_annotated_records(record_list, column_name_id_map):\n\n RESULT_IDX = 'result_indices'\n\n def _get_record_dict(record):\n return record._asdict() if not isinstance(record, dict) else record\n\n split_records = (\n {DATA_KEY: record_dict}\n for record_dict in (_get_record_dict(record) for record in record_list)\n )\n\n combined_records, groups = group.extract_group_metadata(\n split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY\n )\n\n processed_records, record_metadata = zip(\n *tuple(tuple(d.values()) for d in combined_records)\n )\n\n def _replace_column_names_with_ids(group_metadata_item):\n try:\n processed_group_metadata_item = {\n column_name_id_map[k]: v for k, v in group_metadata_item.items()\n }\n except AttributeError:\n processed_group_metadata_item = group_metadata_item\n return processed_group_metadata_item\n\n if groups is not None:\n groups_by_id = {\n grp[group.GroupMetadataField.GROUP_ID.value]: {\n k: _replace_column_names_with_ids(v) for k, v in grp.items()\n if k != group.GroupMetadataField.GROUP_ID.value\n } | {RESULT_IDX: []}\n for grp in groups\n }\n\n for i, meta in enumerate(record_metadata):\n groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)\n\n output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])\n else:\n output_groups = None\n\n return processed_records, output_groups\n", "path": "mathesar/api/utils.py"}, {"content": "from psycopg2.errors import NotNullViolation\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom sqlalchemy.exc import IntegrityError\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n\n\nclass RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n filter = serializers.JSONField(required=False, default=None)\n order_by = serializers.JSONField(required=False, default=[])\n grouping = serializers.JSONField(required=False, default={})\n duplicate_only = serializers.JSONField(required=False, default=None)\n\n\nclass RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):\n def update(self, instance, validated_data):\n table = self.context['table']\n record = table.update_record(instance['id'], validated_data)\n return record\n\n def create(self, validated_data):\n table = self.context['table']\n try:\n record = table.create_record_or_records(validated_data)\n except IntegrityError as e:\n if type(e.orig) == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n table=table\n )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return record\n\n def to_representation(self, instance):\n records = instance._asdict() if not isinstance(instance, dict) else instance\n columns_map = self.context['columns_map']\n records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}\n return records\n\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n return data\n", "path": "mathesar/api/serializers/records.py"}], "after_files": [{"content": "from rest_framework.exceptions import NotFound\nimport re\n\nfrom db.records.operations import group\nfrom mathesar.models import Table\nfrom mathesar.database.types import _get_type_map\n\nDATA_KEY = 'data'\nMETADATA_KEY = 'metadata'\n\n\ndef get_table_or_404(pk):\n \"\"\"\n Get table if it exists, otherwise throws a DRF NotFound error.\n Args:\n pk: id of table\n Returns:\n table: return the table based on a specific id\n \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n\n\ndef process_annotated_records(record_list, column_name_id_map):\n\n RESULT_IDX = 'result_indices'\n\n def _get_record_dict(record):\n return record._asdict() if not isinstance(record, dict) else record\n\n split_records = (\n {DATA_KEY: record_dict}\n for record_dict in (_get_record_dict(record) for record in record_list)\n )\n\n combined_records, groups = group.extract_group_metadata(\n split_records, data_key=DATA_KEY, metadata_key=METADATA_KEY\n )\n\n processed_records, record_metadata = zip(\n *tuple(tuple(d.values()) for d in combined_records)\n )\n\n def _replace_column_names_with_ids(group_metadata_item):\n try:\n processed_group_metadata_item = {\n column_name_id_map[k]: v for k, v in group_metadata_item.items()\n }\n except AttributeError:\n processed_group_metadata_item = group_metadata_item\n return processed_group_metadata_item\n\n if groups is not None:\n groups_by_id = {\n grp[group.GroupMetadataField.GROUP_ID.value]: {\n k: _replace_column_names_with_ids(v) for k, v in grp.items()\n if k != group.GroupMetadataField.GROUP_ID.value\n } | {RESULT_IDX: []}\n for grp in groups\n }\n\n for i, meta in enumerate(record_metadata):\n groups_by_id[meta[group.GroupMetadataField.GROUP_ID.value]][RESULT_IDX].append(i)\n\n output_groups = sorted(list(groups_by_id.values()), key=lambda x: x[RESULT_IDX][0])\n else:\n output_groups = None\n\n return processed_records, output_groups\n\n\ndef is_number(column_type):\n \"\"\"\n Check if a column data type is a number\n Args:\n column_type: data type of column\n \"\"\"\n for type in _get_type_map():\n if type['name'] == 'Number':\n if str(column_type).lower() in type['sa_type_names']:\n return True\n else:\n return False\n\n\ndef follows_json_number_spec(number):\n \"\"\"\n Check if a string follows JSON number spec\n Args:\n number: number as string\n \"\"\"\n patterns = [\n r\"^-?0$\",\n r\"^-?0[\\.][0-9]+$\",\n r\"^-?0[eE][+-]?[0-9]*$\",\n r\"^-?0[\\.][0-9]+[eE][+-]?[0-9]+$\",\n r\"^-?[1-9][0-9]*$\",\n r\"^-?[1-9][0-9]*[\\.][0-9]+$\",\n r\"^-?[1-9][0-9]*[eE][+-]?[0-9]+$\",\n r\"^-?[1-9][0-9]*[\\.][0-9]+[eE][+-]?[0-9]+$\",\n ]\n for pattern in patterns:\n if re.search(pattern, number) is not None:\n return True\n return False\n", "path": "mathesar/api/utils.py"}, {"content": "from psycopg2.errors import NotNullViolation\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom sqlalchemy.exc import IntegrityError\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.models import Column\nfrom mathesar.api.utils import follows_json_number_spec, is_number\n\n\nclass RecordListParameterSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n filter = serializers.JSONField(required=False, default=None)\n order_by = serializers.JSONField(required=False, default=[])\n grouping = serializers.JSONField(required=False, default={})\n duplicate_only = serializers.JSONField(required=False, default=None)\n\n\nclass RecordSerializer(MathesarErrorMessageMixin, serializers.BaseSerializer):\n def update(self, instance, validated_data):\n table = self.context['table']\n record = table.update_record(instance['id'], validated_data)\n return record\n\n def create(self, validated_data):\n table = self.context['table']\n try:\n record = table.create_record_or_records(validated_data)\n except IntegrityError as e:\n if type(e.orig) == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n table=table\n )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return record\n\n def to_representation(self, instance):\n records = instance._asdict() if not isinstance(instance, dict) else instance\n columns_map = self.context['columns_map']\n records = {columns_map[column_name]: column_value for column_name, column_value in records.items()}\n return records\n\n def to_internal_value(self, data):\n columns_map = self.context['columns_map'].inverse\n data = {columns_map[int(column_id)]: value for column_id, value in data.items()}\n # If the data type of the column is number then the value must be an integer\n # or a string which follows JSON number spec.\n for column_name in data.keys():\n column = Column.objects.get(id=columns_map.inverse[column_name])\n column_type = column.type\n value = data[column_name]\n if is_number(column_type) and type(data[column_name]) is str and not follows_json_number_spec(value):\n raise database_api_exceptions.MathesarAPIException(\n IntegrityError,\n status_code=status.HTTP_400_BAD_REQUEST,\n message=\"Number strings should follow JSON number spec\",\n field=column_name\n )\n return data\n", "path": "mathesar/api/serializers/records.py"}]} | 1,702 | 782 |
gh_patches_debug_31432 | rasdani/github-patches | git_diff | facebookresearch__CompilerGym-309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CompilerGym cache directory defaults don't doesn't play nicely with shared access machines
## 🐛 Bug
The default locations of the CompilerGym caches are in shared folders. On multi-user machines this can cause permission errors as the directory may be created by one user without write permissions to other users. I propose switching to user-specific defaults like so:
- [x] `COMPILER_GYM_CACHE` -> `/tmp/compiler_gym-$user`
- [x] `COMPILER_YM_TRANSIENT_CACHE` -> `/dev/shm/compiler_gym-$user`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/util/runfiles_path.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 """Module for resolving a runfiles path."""
6 import getpass
7 import os
8 from pathlib import Path
9
10 # NOTE(cummins): Moving this file may require updating this relative path.
11 _PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), "../../")).resolve(
12 strict=True
13 )
14
15
16 def runfiles_path(relpath: str) -> Path:
17 """Resolve the path to a runfiles data path.
18
19 No checks are to made to ensure that the path, or the containing directory,
20 exist.
21
22 Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running
23 outside of bazel.
24
25 :param relpath: The relative path within the runfiles tree.
26
27 :return: An absolute path.
28 """
29 # There are three ways of determining a runfiles path:
30 # 1. Set the COMPILER_GYM_RUNFILES environment variable.
31 # 2. Using the rules_python library that is provided by bazel. This will
32 # fail if not being executed within a bazel sandbox.
33 # 3. Computing the path relative to the location of this file. This is the
34 # fallback approach that is used for when the code has been installed
35 # by setuptools.
36 runfiles_path = os.environ.get("COMPILER_GYM_RUNFILES")
37 if runfiles_path:
38 return Path(runfiles_path) / relpath
39 else:
40 try:
41 from rules_python.python.runfiles import runfiles
42
43 return Path(
44 runfiles.Create().Rlocation(
45 "CompilerGym" if relpath == "." else f"CompilerGym/{relpath}"
46 )
47 )
48 except (ModuleNotFoundError, TypeError):
49 return _PACKAGE_ROOT / relpath
50
51
52 def site_data_path(relpath: str) -> Path:
53 """Return a path within the site data directory.
54
55 CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.
56 The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable
57 :code:`$COMPILER_GYM_SITE_DATA` to override this default location.
58
59 No checks are to made to ensure that the path, or the containing directory,
60 exist.
61
62 :param relpath: The relative path within the site data tree.
63
64 :return: An absolute path.
65 """
66 # NOTE(cummins): This function has a matching implementation in the C++
67 # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior
68 # here must be reflected in the C++ version.
69 forced = os.environ.get("COMPILER_GYM_SITE_DATA")
70 if forced:
71 return Path(forced) / relpath
72 elif os.environ.get("HOME"):
73 return Path("~/.local/share/compiler_gym").expanduser() / relpath
74 else:
75 return Path("/tmp/compiler_gym/site_data") / relpath
76
77
78 def cache_path(relpath: str) -> Path:
79 """Return a path within the cache directory.
80
81 CompilerGym uses a directory to cache files in, such as downloaded content.
82 The default location for this cache is :code:`~/.cache/compiler_gym`. Set
83 the environment variable :code:`$COMPILER_GYM_CACHE` to override this
84 default location.
85
86 No checks are to made to ensure that the path, or the containing directory,
87 exist.
88
89 :param relpath: The relative path within the cache tree.
90
91 :return: An absolute path.
92 """
93 forced = os.environ.get("COMPILER_GYM_CACHE")
94 if forced:
95 return Path(forced) / relpath
96 elif os.environ.get("HOME"):
97 return Path("~/.cache/compiler_gym").expanduser() / relpath
98 else:
99 return Path("/tmp/compiler_gym/cache") / relpath
100
101
102 def transient_cache_path(relpath: str) -> Path:
103 """Return a path within the transient cache directory.
104
105 The transient cache is a directory used to store files that do not need to
106 persist beyond the lifetime of the current process. When available, the
107 temporary filesystem :code:`/dev/shm` will be used. Else,
108 :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set
109 the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override
110 the default location.
111
112 No checks are to made to ensure that the path, or the containing directory,
113 exist.
114
115 :param relpath: The relative path within the cache tree.
116
117 :return: An absolute path.
118 """
119 forced = os.environ.get("COMPILER_GYM_TRANSIENT_CACHE")
120 if forced:
121 return Path(forced) / relpath
122 elif Path("/dev/shm").is_dir():
123 return Path(f"/dev/shm/compiler_gym_{getpass.getuser()}") / relpath
124 else:
125 # Fallback to using the regular cache.
126 return cache_path(relpath)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py
--- a/compiler_gym/util/runfiles_path.py
+++ b/compiler_gym/util/runfiles_path.py
@@ -3,8 +3,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Module for resolving a runfiles path."""
-import getpass
import os
+from getpass import getuser
from pathlib import Path
# NOTE(cummins): Moving this file may require updating this relative path.
@@ -72,7 +72,7 @@
elif os.environ.get("HOME"):
return Path("~/.local/share/compiler_gym").expanduser() / relpath
else:
- return Path("/tmp/compiler_gym/site_data") / relpath
+ return Path(f"/tmp/compiler_gym_{getuser()}/site_data") / relpath
def cache_path(relpath: str) -> Path:
@@ -96,7 +96,7 @@
elif os.environ.get("HOME"):
return Path("~/.cache/compiler_gym").expanduser() / relpath
else:
- return Path("/tmp/compiler_gym/cache") / relpath
+ return Path(f"/tmp/compiler_gym_{getuser()}/cache") / relpath
def transient_cache_path(relpath: str) -> Path:
@@ -120,7 +120,7 @@
if forced:
return Path(forced) / relpath
elif Path("/dev/shm").is_dir():
- return Path(f"/dev/shm/compiler_gym_{getpass.getuser()}") / relpath
+ return Path(f"/dev/shm/compiler_gym_{getuser()}") / relpath
else:
# Fallback to using the regular cache.
return cache_path(relpath)
| {"golden_diff": "diff --git a/compiler_gym/util/runfiles_path.py b/compiler_gym/util/runfiles_path.py\n--- a/compiler_gym/util/runfiles_path.py\n+++ b/compiler_gym/util/runfiles_path.py\n@@ -3,8 +3,8 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n \"\"\"Module for resolving a runfiles path.\"\"\"\n-import getpass\n import os\n+from getpass import getuser\n from pathlib import Path\n \n # NOTE(cummins): Moving this file may require updating this relative path.\n@@ -72,7 +72,7 @@\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n- return Path(\"/tmp/compiler_gym/site_data\") / relpath\n+ return Path(f\"/tmp/compiler_gym_{getuser()}/site_data\") / relpath\n \n \n def cache_path(relpath: str) -> Path:\n@@ -96,7 +96,7 @@\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n- return Path(\"/tmp/compiler_gym/cache\") / relpath\n+ return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n \n \n def transient_cache_path(relpath: str) -> Path:\n@@ -120,7 +120,7 @@\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n- return Path(f\"/dev/shm/compiler_gym_{getpass.getuser()}\") / relpath\n+ return Path(f\"/dev/shm/compiler_gym_{getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "issue": "CompilerGym cache directory defaults don't doesn't play nicely with shared access machines\n## \ud83d\udc1b Bug\r\n\r\nThe default locations of the CompilerGym caches are in shared folders. On multi-user machines this can cause permission errors as the directory may be created by one user without write permissions to other users. I propose switching to user-specific defaults like so:\r\n\r\n- [x] `COMPILER_GYM_CACHE` -> `/tmp/compiler_gym-$user`\r\n- [x] `COMPILER_YM_TRANSIENT_CACHE` -> `/dev/shm/compiler_gym-$user`\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Module for resolving a runfiles path.\"\"\"\nimport getpass\nimport os\nfrom pathlib import Path\n\n# NOTE(cummins): Moving this file may require updating this relative path.\n_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), \"../../\")).resolve(\n strict=True\n)\n\n\ndef runfiles_path(relpath: str) -> Path:\n \"\"\"Resolve the path to a runfiles data path.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running\n outside of bazel.\n\n :param relpath: The relative path within the runfiles tree.\n\n :return: An absolute path.\n \"\"\"\n # There are three ways of determining a runfiles path:\n # 1. Set the COMPILER_GYM_RUNFILES environment variable.\n # 2. Using the rules_python library that is provided by bazel. This will\n # fail if not being executed within a bazel sandbox.\n # 3. Computing the path relative to the location of this file. This is the\n # fallback approach that is used for when the code has been installed\n # by setuptools.\n runfiles_path = os.environ.get(\"COMPILER_GYM_RUNFILES\")\n if runfiles_path:\n return Path(runfiles_path) / relpath\n else:\n try:\n from rules_python.python.runfiles import runfiles\n\n return Path(\n runfiles.Create().Rlocation(\n \"CompilerGym\" if relpath == \".\" else f\"CompilerGym/{relpath}\"\n )\n )\n except (ModuleNotFoundError, TypeError):\n return _PACKAGE_ROOT / relpath\n\n\ndef site_data_path(relpath: str) -> Path:\n \"\"\"Return a path within the site data directory.\n\n CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.\n The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable\n :code:`$COMPILER_GYM_SITE_DATA` to override this default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the site data tree.\n\n :return: An absolute path.\n \"\"\"\n # NOTE(cummins): This function has a matching implementation in the C++\n # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior\n # here must be reflected in the C++ version.\n forced = os.environ.get(\"COMPILER_GYM_SITE_DATA\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n return Path(\"/tmp/compiler_gym/site_data\") / relpath\n\n\ndef cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the cache directory.\n\n CompilerGym uses a directory to cache files in, such as downloaded content.\n The default location for this cache is :code:`~/.cache/compiler_gym`. Set\n the environment variable :code:`$COMPILER_GYM_CACHE` to override this\n default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(\"/tmp/compiler_gym/cache\") / relpath\n\n\ndef transient_cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the transient cache directory.\n\n The transient cache is a directory used to store files that do not need to\n persist beyond the lifetime of the current process. When available, the\n temporary filesystem :code:`/dev/shm` will be used. Else,\n :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set\n the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override\n the default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_TRANSIENT_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n return Path(f\"/dev/shm/compiler_gym_{getpass.getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "path": "compiler_gym/util/runfiles_path.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Module for resolving a runfiles path.\"\"\"\nimport os\nfrom getpass import getuser\nfrom pathlib import Path\n\n# NOTE(cummins): Moving this file may require updating this relative path.\n_PACKAGE_ROOT = Path(os.path.join(os.path.dirname(__file__), \"../../\")).resolve(\n strict=True\n)\n\n\ndef runfiles_path(relpath: str) -> Path:\n \"\"\"Resolve the path to a runfiles data path.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n Use environment variable COMPILER_GYM_RUNFILES=/path/to/runfiles if running\n outside of bazel.\n\n :param relpath: The relative path within the runfiles tree.\n\n :return: An absolute path.\n \"\"\"\n # There are three ways of determining a runfiles path:\n # 1. Set the COMPILER_GYM_RUNFILES environment variable.\n # 2. Using the rules_python library that is provided by bazel. This will\n # fail if not being executed within a bazel sandbox.\n # 3. Computing the path relative to the location of this file. This is the\n # fallback approach that is used for when the code has been installed\n # by setuptools.\n runfiles_path = os.environ.get(\"COMPILER_GYM_RUNFILES\")\n if runfiles_path:\n return Path(runfiles_path) / relpath\n else:\n try:\n from rules_python.python.runfiles import runfiles\n\n return Path(\n runfiles.Create().Rlocation(\n \"CompilerGym\" if relpath == \".\" else f\"CompilerGym/{relpath}\"\n )\n )\n except (ModuleNotFoundError, TypeError):\n return _PACKAGE_ROOT / relpath\n\n\ndef site_data_path(relpath: str) -> Path:\n \"\"\"Return a path within the site data directory.\n\n CompilerGym uses a directory to store persistent site data files in, such as benchmark datasets.\n The default location is :code:`~/.local/share/compiler_gym`. Set the environment variable\n :code:`$COMPILER_GYM_SITE_DATA` to override this default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the site data tree.\n\n :return: An absolute path.\n \"\"\"\n # NOTE(cummins): This function has a matching implementation in the C++\n # sources, compiler_gym::service::getSiteDataPath(). Any change to behavior\n # here must be reflected in the C++ version.\n forced = os.environ.get(\"COMPILER_GYM_SITE_DATA\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.local/share/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/site_data\") / relpath\n\n\ndef cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the cache directory.\n\n CompilerGym uses a directory to cache files in, such as downloaded content.\n The default location for this cache is :code:`~/.cache/compiler_gym`. Set\n the environment variable :code:`$COMPILER_GYM_CACHE` to override this\n default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif os.environ.get(\"HOME\"):\n return Path(\"~/.cache/compiler_gym\").expanduser() / relpath\n else:\n return Path(f\"/tmp/compiler_gym_{getuser()}/cache\") / relpath\n\n\ndef transient_cache_path(relpath: str) -> Path:\n \"\"\"Return a path within the transient cache directory.\n\n The transient cache is a directory used to store files that do not need to\n persist beyond the lifetime of the current process. When available, the\n temporary filesystem :code:`/dev/shm` will be used. Else,\n :meth:`cache_path() <compiler_gym.cache_path>` is used as a fallback. Set\n the environment variable :code:`$COMPILER_GYM_TRANSIENT_CACHE` to override\n the default location.\n\n No checks are to made to ensure that the path, or the containing directory,\n exist.\n\n :param relpath: The relative path within the cache tree.\n\n :return: An absolute path.\n \"\"\"\n forced = os.environ.get(\"COMPILER_GYM_TRANSIENT_CACHE\")\n if forced:\n return Path(forced) / relpath\n elif Path(\"/dev/shm\").is_dir():\n return Path(f\"/dev/shm/compiler_gym_{getuser()}\") / relpath\n else:\n # Fallback to using the regular cache.\n return cache_path(relpath)\n", "path": "compiler_gym/util/runfiles_path.py"}]} | 1,767 | 408 |
gh_patches_debug_1369 | rasdani/github-patches | git_diff | Parsl__parsl-972 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `ModuleNotFoundError: No module named 'monitoring'`
Looks like this bug was introduced with the recent merge of monitoring back into the parsl repo.
```
Traceback (most recent call last):
File "/Users/awoodard/software/miniconda3/bin/parsl-visualize", line 11, in <module>
load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')()
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 484, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2707, in load_entry_point
return ep.load()
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2325, in load
return self.resolve()
File "/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py", line 2331, in resolve
module = __import__(self.module_name, fromlist=['__name__'], level=0)
ModuleNotFoundError: No module named 'monitoring'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open('parsl/version.py') as f:
4 exec(f.read())
5
6 with open('requirements.txt') as f:
7 install_requires = f.readlines()
8
9 extras_require = {
10 'monitoring' : [
11 'psutil',
12 'sqlalchemy',
13 'sqlalchemy_utils',
14 'pydot',
15 'networkx',
16 'Flask',
17 'flask_sqlalchemy',
18 'pandas',
19 'plotly',
20 'python-daemon'
21 ],
22 'aws' : ['boto3'],
23 'kubernetes' : ['kubernetes'],
24 'extreme_scale' : ['mpi4py'],
25 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],
26 'google_cloud' : ['google-auth', 'google-api-python-client'],
27 'gssapi' : ['python-gssapi'],
28 }
29 extras_require['all'] = sum(extras_require.values(), [])
30
31 setup(
32 name='parsl',
33 version=VERSION,
34 description='Simple data dependent workflows in Python',
35 long_description='Simple parallel workflows system for Python',
36 url='https://github.com/Parsl/parsl',
37 author='The Parsl Team',
38 author_email='[email protected]',
39 license='Apache 2.0',
40 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
41 include_package_data=True,
42 packages=find_packages(),
43 install_requires=install_requires,
44 scripts = ['parsl/executors/high_throughput/process_worker_pool.py',
45 'parsl/executors/extreme_scale/mpi_worker_pool.py',
46 'parsl/executors/low_latency/lowlatency_worker.py',
47 ],
48 extras_require=extras_require,
49 classifiers=[
50 # Maturity
51 'Development Status :: 3 - Alpha',
52 # Intended audience
53 'Intended Audience :: Developers',
54 # Licence, must match with licence above
55 'License :: OSI Approved :: Apache Software License',
56 # Python versions supported
57 'Programming Language :: Python :: 3.5',
58 'Programming Language :: Python :: 3.6',
59 ],
60 keywords=['Workflows', 'Scientific computing'],
61 entry_points={'console_scripts':
62 [
63 'parsl-globus-auth=parsl.data_provider.globus:cli_run',
64 'parsl-visualize=monitoring.visualization.app:cli_run',
65 ]}
66 )
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,6 @@
entry_points={'console_scripts':
[
'parsl-globus-auth=parsl.data_provider.globus:cli_run',
- 'parsl-visualize=monitoring.visualization.app:cli_run',
+ 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',
]}
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,6 @@\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n- 'parsl-visualize=monitoring.visualization.app:cli_run',\n+ 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n ]}\n )\n", "issue": "Fix `ModuleNotFoundError: No module named 'monitoring'`\nLooks like this bug was introduced with the recent merge of monitoring back into the parsl repo.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/awoodard/software/miniconda3/bin/parsl-visualize\", line 11, in <module>\r\n load_entry_point('parsl==0.7.2', 'console_scripts', 'parsl-visualize')()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 484, in load_entry_point\r\n return get_distribution(dist).load_entry_point(group, name)\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2707, in load_entry_point\r\n return ep.load()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2325, in load\r\n return self.resolve()\r\n File \"/Users/awoodard/software/miniconda3/lib/python3.7/site-packages/pkg_resources/__init__.py\", line 2331, in resolve\r\n module = __import__(self.module_name, fromlist=['__name__'], level=0)\r\nModuleNotFoundError: No module named 'monitoring'\r\n```\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'psutil',\n 'sqlalchemy',\n 'sqlalchemy_utils',\n 'pydot',\n 'networkx',\n 'Flask',\n 'flask_sqlalchemy',\n 'pandas',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n ]}\n)\n", "path": "setup.py"}]} | 1,207 | 99 |
gh_patches_debug_13031 | rasdani/github-patches | git_diff | inventree__InvenTree-6284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Platform UI refuses to log out
### Please verify that this bug has NOT been raised before.
- [X] I checked and didn't find a similar issue
### Describe the bug*
Discovered when I was setting up Platorm UI for dev, trying to log out simply sends you to the Home page and tells you that you were already logged in

### Steps to Reproduce
Not sure about the exact trigger here. It's still occuring to me as it did yesterday.
### Expected behaviour
Being able to log out
### Deployment Method
- [ ] Docker
- [ ] Bare metal
### Version Information
InvenTree - inventree.org
The Open-Source Inventory Management System
Installation paths:
Base /workspaces/InvenTree
Config /workspaces/InvenTree/dev/config.yaml
Media /workspaces/InvenTree/dev/media
Static /workspaces/InvenTree/dev/static
Versions:
Python 3.10.10
Django 3.2.23
InvenTree 0.13.0 dev
API 152
Node v20.9.0
Yarn 1.22.19
Commit hash:dabd95d
Commit date:2023-11-21
### Please verify if you can reproduce this bug on the demo site.
- [ ] I can reproduce this bug on the demo site.
### Relevant log output
```shell
Created new API token for user 'admin' (name='inventree-web-app')
[22/Nov/2023 17:23:56] "GET /api/user/token/?name=inventree-web-app HTTP/1.1" 200 114
[22/Nov/2023 17:23:56] "GET /api/user/me/ HTTP/1.1" 200 134
[22/Nov/2023 17:23:56] "GET /api/notifications/?read=false&limit=1 HTTP/1.1" 200 52
[22/Nov/2023 17:23:57] "GET /api/user/roles/ HTTP/1.1" 200 527
[22/Nov/2023 17:23:57] "GET /api/settings/global/ HTTP/1.1" 200 27344
Created new API token for user 'admin' (name='inventree-web-app')
[22/Nov/2023 17:23:57] "GET /api/user/token/?name=inventree-web-app HTTP/1.1" 200 114
Background worker check failed
Email backend not configured
InvenTree system health checks failed
[22/Nov/2023 17:23:57] "GET /api/ HTTP/1.1" 200 1145
[22/Nov/2023 17:23:57] "GET /api/user/me/ HTTP/1.1" 200 134
[22/Nov/2023 17:23:57] "GET /api/generic/status/ HTTP/1.1" 200 5851
[22/Nov/2023 17:23:57] "GET /api/user/roles/ HTTP/1.1" 200 527
Background worker check failed
Email backend not configured
InvenTree system health checks failed
[22/Nov/2023 17:23:58] "GET /api/settings/global/ HTTP/1.1" 200 27344
[22/Nov/2023 17:23:58] "GET /api/ HTTP/1.1" 200 1145
[22/Nov/2023 17:23:58] "GET /api/settings/user/ HTTP/1.1" 200 13878
[22/Nov/2023 17:23:58] "GET /api/generic/status/ HTTP/1.1" 200 5851
[22/Nov/2023 17:23:58] "GET /api/settings/user/ HTTP/1.1" 200 13878
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/web/urls.py`
Content:
```
1 """URLs for web app."""
2
3 from django.conf import settings
4 from django.shortcuts import redirect
5 from django.urls import include, path
6 from django.views.decorators.csrf import ensure_csrf_cookie
7 from django.views.generic import TemplateView
8
9
10 class RedirectAssetView(TemplateView):
11 """View to redirect to static asset."""
12
13 def get(self, request, *args, **kwargs):
14 """Redirect to static asset."""
15 return redirect(
16 f"{settings.STATIC_URL}web/assets/{kwargs['path']}", permanent=True
17 )
18
19
20 spa_view = ensure_csrf_cookie(TemplateView.as_view(template_name='web/index.html'))
21 assets_path = path('assets/<path:path>', RedirectAssetView.as_view())
22
23
24 urlpatterns = [
25 path(
26 f'{settings.FRONTEND_URL_BASE}/',
27 include([
28 assets_path,
29 path(
30 'set-password?uid=<uid>&token=<token>',
31 spa_view,
32 name='password_reset_confirm',
33 ),
34 path('', spa_view),
35 ]),
36 ),
37 assets_path,
38 path(settings.FRONTEND_URL_BASE, spa_view, name='platform'),
39 ]
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/web/urls.py b/InvenTree/web/urls.py
--- a/InvenTree/web/urls.py
+++ b/InvenTree/web/urls.py
@@ -2,7 +2,7 @@
from django.conf import settings
from django.shortcuts import redirect
-from django.urls import include, path
+from django.urls import include, path, re_path
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
@@ -31,7 +31,7 @@
spa_view,
name='password_reset_confirm',
),
- path('', spa_view),
+ re_path('.*', spa_view),
]),
),
assets_path,
| {"golden_diff": "diff --git a/InvenTree/web/urls.py b/InvenTree/web/urls.py\n--- a/InvenTree/web/urls.py\n+++ b/InvenTree/web/urls.py\n@@ -2,7 +2,7 @@\n \n from django.conf import settings\n from django.shortcuts import redirect\n-from django.urls import include, path\n+from django.urls import include, path, re_path\n from django.views.decorators.csrf import ensure_csrf_cookie\n from django.views.generic import TemplateView\n \n@@ -31,7 +31,7 @@\n spa_view,\n name='password_reset_confirm',\n ),\n- path('', spa_view),\n+ re_path('.*', spa_view),\n ]),\n ),\n assets_path,\n", "issue": "Platform UI refuses to log out\n### Please verify that this bug has NOT been raised before.\n\n- [X] I checked and didn't find a similar issue\n\n### Describe the bug*\n\nDiscovered when I was setting up Platorm UI for dev, trying to log out simply sends you to the Home page and tells you that you were already logged in\r\n\r\n\n\n### Steps to Reproduce\n\nNot sure about the exact trigger here. It's still occuring to me as it did yesterday.\r\n\n\n### Expected behaviour\n\nBeing able to log out\n\n### Deployment Method\n\n- [ ] Docker\n- [ ] Bare metal\n\n### Version Information\n\nInvenTree - inventree.org\r\nThe Open-Source Inventory Management System\r\n\r\n\r\nInstallation paths:\r\nBase /workspaces/InvenTree\r\nConfig /workspaces/InvenTree/dev/config.yaml\r\nMedia /workspaces/InvenTree/dev/media\r\nStatic /workspaces/InvenTree/dev/static\r\n\r\nVersions:\r\nPython 3.10.10\r\nDjango 3.2.23\r\nInvenTree 0.13.0 dev\r\nAPI 152\r\nNode v20.9.0\r\nYarn 1.22.19\r\n\r\nCommit hash:dabd95d\r\nCommit date:2023-11-21\n\n### Please verify if you can reproduce this bug on the demo site.\n\n- [ ] I can reproduce this bug on the demo site.\n\n### Relevant log output\n\n```shell\nCreated new API token for user 'admin' (name='inventree-web-app')\r\n[22/Nov/2023 17:23:56] \"GET /api/user/token/?name=inventree-web-app HTTP/1.1\" 200 114\r\n[22/Nov/2023 17:23:56] \"GET /api/user/me/ HTTP/1.1\" 200 134\r\n[22/Nov/2023 17:23:56] \"GET /api/notifications/?read=false&limit=1 HTTP/1.1\" 200 52\r\n[22/Nov/2023 17:23:57] \"GET /api/user/roles/ HTTP/1.1\" 200 527\r\n[22/Nov/2023 17:23:57] \"GET /api/settings/global/ HTTP/1.1\" 200 27344\r\nCreated new API token for user 'admin' (name='inventree-web-app')\r\n[22/Nov/2023 17:23:57] \"GET /api/user/token/?name=inventree-web-app HTTP/1.1\" 200 114\r\nBackground worker check failed\r\nEmail backend not configured\r\nInvenTree system health checks failed\r\n[22/Nov/2023 17:23:57] \"GET /api/ HTTP/1.1\" 200 1145\r\n[22/Nov/2023 17:23:57] \"GET /api/user/me/ HTTP/1.1\" 200 134\r\n[22/Nov/2023 17:23:57] \"GET /api/generic/status/ HTTP/1.1\" 200 5851\r\n[22/Nov/2023 17:23:57] \"GET /api/user/roles/ HTTP/1.1\" 200 527\r\nBackground worker check failed\r\nEmail backend not configured\r\nInvenTree system health checks failed\r\n[22/Nov/2023 17:23:58] \"GET /api/settings/global/ HTTP/1.1\" 200 27344\r\n[22/Nov/2023 17:23:58] \"GET /api/ HTTP/1.1\" 200 1145\r\n[22/Nov/2023 17:23:58] \"GET /api/settings/user/ HTTP/1.1\" 200 13878\r\n[22/Nov/2023 17:23:58] \"GET /api/generic/status/ HTTP/1.1\" 200 5851\r\n[22/Nov/2023 17:23:58] \"GET /api/settings/user/ HTTP/1.1\" 200 13878\n```\n\n", "before_files": [{"content": "\"\"\"URLs for web app.\"\"\"\n\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.urls import include, path\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.views.generic import TemplateView\n\n\nclass RedirectAssetView(TemplateView):\n \"\"\"View to redirect to static asset.\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Redirect to static asset.\"\"\"\n return redirect(\n f\"{settings.STATIC_URL}web/assets/{kwargs['path']}\", permanent=True\n )\n\n\nspa_view = ensure_csrf_cookie(TemplateView.as_view(template_name='web/index.html'))\nassets_path = path('assets/<path:path>', RedirectAssetView.as_view())\n\n\nurlpatterns = [\n path(\n f'{settings.FRONTEND_URL_BASE}/',\n include([\n assets_path,\n path(\n 'set-password?uid=<uid>&token=<token>',\n spa_view,\n name='password_reset_confirm',\n ),\n path('', spa_view),\n ]),\n ),\n assets_path,\n path(settings.FRONTEND_URL_BASE, spa_view, name='platform'),\n]\n", "path": "InvenTree/web/urls.py"}], "after_files": [{"content": "\"\"\"URLs for web app.\"\"\"\n\nfrom django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.urls import include, path, re_path\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.views.generic import TemplateView\n\n\nclass RedirectAssetView(TemplateView):\n \"\"\"View to redirect to static asset.\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"Redirect to static asset.\"\"\"\n return redirect(\n f\"{settings.STATIC_URL}web/assets/{kwargs['path']}\", permanent=True\n )\n\n\nspa_view = ensure_csrf_cookie(TemplateView.as_view(template_name='web/index.html'))\nassets_path = path('assets/<path:path>', RedirectAssetView.as_view())\n\n\nurlpatterns = [\n path(\n f'{settings.FRONTEND_URL_BASE}/',\n include([\n assets_path,\n path(\n 'set-password?uid=<uid>&token=<token>',\n spa_view,\n name='password_reset_confirm',\n ),\n re_path('.*', spa_view),\n ]),\n ),\n assets_path,\n path(settings.FRONTEND_URL_BASE, spa_view, name='platform'),\n]\n", "path": "InvenTree/web/urls.py"}]} | 1,678 | 155 |
gh_patches_debug_24482 | rasdani/github-patches | git_diff | sunpy__sunpy-3515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in documentation for "Finding bright regions with ndimage" example.
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
<!-- Provide a general description of the bug. -->
There seems to be an error in the documentation for the "Finding bright regions with ndimage" example.
In the part where a mask is made, the surrounding text states: " We choose the criterion that the data should be at least 5% of the maximum value. " However, if you look at the code immediately below, the threshold is based off 10% the max value:
`mask = aiamap.data < aiamap.max() * 0.10`
### Expected behavior
<!-- What did you expect to happen. -->
Documentation needs to be modified to reflect that the threshold is based off a 10% threshold.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/map/image_bright_regions_gallery_example.py`
Content:
```
1 # coding: utf-8
2 """
3 ===================================
4 Finding bright regions with ndimage
5 ===================================
6
7 How you can to find the brightest regions in an AIA image and
8 count the approximate number of regions of interest using ndimage.
9 """
10 # sphinx_gallery_thumbnail_number = 2
11
12 from scipy import ndimage
13 import matplotlib.pyplot as plt
14
15 import sunpy.map
16 from sunpy.data.sample import AIA_193_IMAGE
17
18 ###############################################################################
19 # We start with the sample data
20 aiamap_mask = sunpy.map.Map(AIA_193_IMAGE)
21 aiamap = sunpy.map.Map(AIA_193_IMAGE)
22
23 ##############################################################################
24 # First we make a mask, which tells us which regions are bright. We
25 # choose the criterion that the data should be at least 5% of the maximum
26 # value. Pixels with intensity values greater than this are included in the
27 # mask, while all other pixels are excluded.
28 mask = aiamap.data < aiamap.max() * 0.10
29
30 ##############################################################################
31 # Mask is a `boolean` array. It can be used to modify the original map object
32 # without modifying the data. Once this mask attribute is set, we can plot the
33 # image again.
34 aiamap_mask.mask = mask
35 plt.figure()
36 aiamap.plot()
37 plt.colorbar()
38 plt.show()
39
40 ##############################################################################
41 # Only the brightest pixels remain in the image.
42 # However, these areas are artificially broken up into small regions.
43 # We can solve this by applying some smoothing to the image data.
44 # Here we apply a 2D Gaussian smoothing function to the data.
45 data2 = ndimage.gaussian_filter(aiamap.data * ~mask, 14)
46
47 ##############################################################################
48 # The issue with the filtering is that it create pixels where the values are
49 # small (<100), so when we go on later to label this array,
50 # we get one large region which encompasses the entire array.
51 # If you want to see, just remove this line.
52 data2[data2 < 100] = 0
53
54 ##############################################################################
55 # Now we will make a second SunPy map with this smoothed data.
56 aiamap2 = sunpy.map.Map(data2, aiamap.meta)
57
58 ##############################################################################
59 # The function `label` from the `scipy.ndimage` module, counts the number of
60 # contiguous regions in an image.
61 labels, n = ndimage.label(aiamap2.data)
62
63 ##############################################################################
64 # Finally, we plot the smoothed bright image data, along with the estimate of
65 # the number of distinct regions. We can see that approximately 6 distinct hot
66 # regions are present above the 5% of the maximum level.
67 plt.figure()
68 ax = plt.subplot(projection=aiamap)
69 aiamap.plot()
70 plt.contour(labels)
71 plt.figtext(0.3, 0.2, f'Number of regions = {n}', color='white')
72 plt.show()
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/map/image_bright_regions_gallery_example.py b/examples/map/image_bright_regions_gallery_example.py
--- a/examples/map/image_bright_regions_gallery_example.py
+++ b/examples/map/image_bright_regions_gallery_example.py
@@ -22,7 +22,7 @@
##############################################################################
# First we make a mask, which tells us which regions are bright. We
-# choose the criterion that the data should be at least 5% of the maximum
+# choose the criterion that the data should be at least 10% of the maximum
# value. Pixels with intensity values greater than this are included in the
# mask, while all other pixels are excluded.
mask = aiamap.data < aiamap.max() * 0.10
@@ -63,7 +63,7 @@
##############################################################################
# Finally, we plot the smoothed bright image data, along with the estimate of
# the number of distinct regions. We can see that approximately 6 distinct hot
-# regions are present above the 5% of the maximum level.
+# regions are present above the 10% of the maximum level.
plt.figure()
ax = plt.subplot(projection=aiamap)
aiamap.plot()
| {"golden_diff": "diff --git a/examples/map/image_bright_regions_gallery_example.py b/examples/map/image_bright_regions_gallery_example.py\n--- a/examples/map/image_bright_regions_gallery_example.py\n+++ b/examples/map/image_bright_regions_gallery_example.py\n@@ -22,7 +22,7 @@\n \n ##############################################################################\n # First we make a mask, which tells us which regions are bright. We\n-# choose the criterion that the data should be at least 5% of the maximum\n+# choose the criterion that the data should be at least 10% of the maximum\n # value. Pixels with intensity values greater than this are included in the\n # mask, while all other pixels are excluded.\n mask = aiamap.data < aiamap.max() * 0.10\n@@ -63,7 +63,7 @@\n ##############################################################################\n # Finally, we plot the smoothed bright image data, along with the estimate of\n # the number of distinct regions. We can see that approximately 6 distinct hot\n-# regions are present above the 5% of the maximum level.\n+# regions are present above the 10% of the maximum level.\n plt.figure()\n ax = plt.subplot(projection=aiamap)\n aiamap.plot()\n", "issue": "Error in documentation for \"Finding bright regions with ndimage\" example.\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nThere seems to be an error in the documentation for the \"Finding bright regions with ndimage\" example.\r\n\r\nIn the part where a mask is made, the surrounding text states: \" We choose the criterion that the data should be at least 5% of the maximum value. \" However, if you look at the code immediately below, the threshold is based off 10% the max value:\r\n`mask = aiamap.data < aiamap.max() * 0.10`\r\n\r\n### Expected behavior\r\n<!-- What did you expect to happen. -->\r\n\r\nDocumentation needs to be modified to reflect that the threshold is based off a 10% threshold. \n", "before_files": [{"content": "# coding: utf-8\n\"\"\"\n===================================\nFinding bright regions with ndimage\n===================================\n\nHow you can to find the brightest regions in an AIA image and\ncount the approximate number of regions of interest using ndimage.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\n\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_193_IMAGE\n\n###############################################################################\n# We start with the sample data\naiamap_mask = sunpy.map.Map(AIA_193_IMAGE)\naiamap = sunpy.map.Map(AIA_193_IMAGE)\n\n##############################################################################\n# First we make a mask, which tells us which regions are bright. We\n# choose the criterion that the data should be at least 5% of the maximum\n# value. Pixels with intensity values greater than this are included in the\n# mask, while all other pixels are excluded.\nmask = aiamap.data < aiamap.max() * 0.10\n\n##############################################################################\n# Mask is a `boolean` array. It can be used to modify the original map object\n# without modifying the data. Once this mask attribute is set, we can plot the\n# image again.\naiamap_mask.mask = mask\nplt.figure()\naiamap.plot()\nplt.colorbar()\nplt.show()\n\n##############################################################################\n# Only the brightest pixels remain in the image.\n# However, these areas are artificially broken up into small regions.\n# We can solve this by applying some smoothing to the image data.\n# Here we apply a 2D Gaussian smoothing function to the data.\ndata2 = ndimage.gaussian_filter(aiamap.data * ~mask, 14)\n\n##############################################################################\n# The issue with the filtering is that it create pixels where the values are\n# small (<100), so when we go on later to label this array,\n# we get one large region which encompasses the entire array.\n# If you want to see, just remove this line.\ndata2[data2 < 100] = 0\n\n##############################################################################\n# Now we will make a second SunPy map with this smoothed data.\naiamap2 = sunpy.map.Map(data2, aiamap.meta)\n\n##############################################################################\n# The function `label` from the `scipy.ndimage` module, counts the number of\n# contiguous regions in an image.\nlabels, n = ndimage.label(aiamap2.data)\n\n##############################################################################\n# Finally, we plot the smoothed bright image data, along with the estimate of\n# the number of distinct regions. We can see that approximately 6 distinct hot\n# regions are present above the 5% of the maximum level.\nplt.figure()\nax = plt.subplot(projection=aiamap)\naiamap.plot()\nplt.contour(labels)\nplt.figtext(0.3, 0.2, f'Number of regions = {n}', color='white')\nplt.show()\n", "path": "examples/map/image_bright_regions_gallery_example.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"\n===================================\nFinding bright regions with ndimage\n===================================\n\nHow you can to find the brightest regions in an AIA image and\ncount the approximate number of regions of interest using ndimage.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\n\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_193_IMAGE\n\n###############################################################################\n# We start with the sample data\naiamap_mask = sunpy.map.Map(AIA_193_IMAGE)\naiamap = sunpy.map.Map(AIA_193_IMAGE)\n\n##############################################################################\n# First we make a mask, which tells us which regions are bright. We\n# choose the criterion that the data should be at least 10% of the maximum\n# value. Pixels with intensity values greater than this are included in the\n# mask, while all other pixels are excluded.\nmask = aiamap.data < aiamap.max() * 0.10\n\n##############################################################################\n# Mask is a `boolean` array. It can be used to modify the original map object\n# without modifying the data. Once this mask attribute is set, we can plot the\n# image again.\naiamap_mask.mask = mask\nplt.figure()\naiamap.plot()\nplt.colorbar()\nplt.show()\n\n##############################################################################\n# Only the brightest pixels remain in the image.\n# However, these areas are artificially broken up into small regions.\n# We can solve this by applying some smoothing to the image data.\n# Here we apply a 2D Gaussian smoothing function to the data.\ndata2 = ndimage.gaussian_filter(aiamap.data * ~mask, 14)\n\n##############################################################################\n# The issue with the filtering is that it create pixels where the values are\n# small (<100), so when we go on later to label this array,\n# we get one large region which encompasses the entire array.\n# If you want to see, just remove this line.\ndata2[data2 < 100] = 0\n\n##############################################################################\n# Now we will make a second SunPy map with this smoothed data.\naiamap2 = sunpy.map.Map(data2, aiamap.meta)\n\n##############################################################################\n# The function `label` from the `scipy.ndimage` module, counts the number of\n# contiguous regions in an image.\nlabels, n = ndimage.label(aiamap2.data)\n\n##############################################################################\n# Finally, we plot the smoothed bright image data, along with the estimate of\n# the number of distinct regions. We can see that approximately 6 distinct hot\n# regions are present above the 10% of the maximum level.\nplt.figure()\nax = plt.subplot(projection=aiamap)\naiamap.plot()\nplt.contour(labels)\nplt.figtext(0.3, 0.2, f'Number of regions = {n}', color='white')\nplt.show()\n", "path": "examples/map/image_bright_regions_gallery_example.py"}]} | 1,311 | 259 |
gh_patches_debug_7546 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1594 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add content_owner to organisation REST API filters
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/organisation.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.conf import settings
8
9 from rest_framework.compat import etree, six
10 from rest_framework.exceptions import ParseError
11 from rest_framework.parsers import XMLParser, JSONParser
12
13 from akvo.rsr.models import Organisation, Country
14
15 from ..serializers import OrganisationSerializer
16 from ..viewsets import BaseRSRViewSet
17
18
19 class AkvoOrganisationParser(XMLParser):
20 def parse(self, stream, media_type=None, parser_context=None):
21 assert etree, 'XMLParser requires defusedxml to be installed'
22
23 parser_context = parser_context or {}
24 encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
25 parser = etree.DefusedXMLParser(encoding=encoding)
26 try:
27 tree = etree.parse(stream, parser=parser, forbid_dtd=True)
28 except (etree.ParseError, ValueError) as exc:
29 raise ParseError('XML parse error - %s' % six.text_type(exc))
30 return self.organisation_data_from_etree(tree.getroot())
31
32 def organisation_data_from_etree(self, tree):
33 def find_text(tree, str):
34 element = tree.find(str)
35 if element is None:
36 return ''
37 return element.text.strip() if element.text else ""
38
39 def location_data(location_tree):
40 if location_tree is None:
41 return []
42 iso_code = find_text(location_tree, 'iso_code').lower()
43 country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))
44 country = country.id
45 latitude = find_text(location_tree, 'latitude') or 0
46 longitude = find_text(location_tree, 'longitude') or 0
47 primary = True
48 return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]
49
50 #id = find_text(tree, 'org_id')
51 long_name = find_text(tree, 'name')
52 name = long_name[:25]
53 description = find_text(tree, 'description')
54 url = find_text(tree, 'url')
55 iati_type = find_text(tree, 'iati_organisation_type')
56 new_organisation_type = int(iati_type) if iati_type else 22
57 organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)
58 locations = location_data(tree.find('location/object'))
59 return dict(
60 name=name, long_name=long_name, description=description, url=url,
61 organisation_type=organisation_type, new_organisation_type=new_organisation_type,
62 locations=locations
63 )
64
65
66 class OrganisationViewSet(BaseRSRViewSet):
67 """
68 API endpoint that allows organisations to be viewed or edited.
69 """
70 queryset = Organisation.objects.all()
71 serializer_class = OrganisationSerializer
72 parser_classes = (AkvoOrganisationParser, JSONParser,)
73 filter_fields = ('name', 'long_name', 'iati_org_id', )
74
75 def get_queryset(self):
76 """ Enable filtering of Organisations on iati_org_id or name
77 """
78 queryset = super(OrganisationViewSet, self).get_queryset()
79 pk = self.request.QUERY_PARAMS.get('id', None)
80 if pk is not None:
81 try:
82 queryset = queryset.filter(pk=pk)
83 except ValueError:
84 pass
85 iati_org_id = self.request.QUERY_PARAMS.get('iati_org_id', None)
86 if iati_org_id is not None:
87 queryset = queryset.filter(iati_org_id=iati_org_id)
88 name = self.request.QUERY_PARAMS.get('name', None)
89 if name is not None:
90 queryset = queryset.filter(name=name)
91 long_name = self.request.QUERY_PARAMS.get('long_name', None)
92 if long_name is not None:
93 queryset = queryset.filter(long_name=long_name)
94 return queryset
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py
--- a/akvo/rest/views/organisation.py
+++ b/akvo/rest/views/organisation.py
@@ -70,7 +70,7 @@
queryset = Organisation.objects.all()
serializer_class = OrganisationSerializer
parser_classes = (AkvoOrganisationParser, JSONParser,)
- filter_fields = ('name', 'long_name', 'iati_org_id', )
+ filter_fields = ('name', 'long_name', 'iati_org_id', 'content_owner')
def get_queryset(self):
""" Enable filtering of Organisations on iati_org_id or name
| {"golden_diff": "diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py\n--- a/akvo/rest/views/organisation.py\n+++ b/akvo/rest/views/organisation.py\n@@ -70,7 +70,7 @@\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n- filter_fields = ('name', 'long_name', 'iati_org_id', )\n+ filter_fields = ('name', 'long_name', 'iati_org_id', 'content_owner')\n \n def get_queryset(self):\n \"\"\" Enable filtering of Organisations on iati_org_id or name\n", "issue": "Add content_owner to organisation REST API filters\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\n\nfrom rest_framework.compat import etree, six\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import XMLParser, JSONParser\n\nfrom akvo.rsr.models import Organisation, Country\n\nfrom ..serializers import OrganisationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n #id = find_text(tree, 'org_id')\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n filter_fields = ('name', 'long_name', 'iati_org_id', )\n\n def get_queryset(self):\n \"\"\" Enable filtering of Organisations on iati_org_id or name\n \"\"\"\n queryset = super(OrganisationViewSet, self).get_queryset()\n pk = self.request.QUERY_PARAMS.get('id', None)\n if pk is not None:\n try:\n queryset = queryset.filter(pk=pk)\n except ValueError:\n pass\n iati_org_id = self.request.QUERY_PARAMS.get('iati_org_id', None)\n if iati_org_id is not None:\n queryset = queryset.filter(iati_org_id=iati_org_id)\n name = self.request.QUERY_PARAMS.get('name', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n long_name = self.request.QUERY_PARAMS.get('long_name', None)\n if long_name is not None:\n queryset = queryset.filter(long_name=long_name)\n return queryset\n", "path": "akvo/rest/views/organisation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\n\nfrom rest_framework.compat import etree, six\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import XMLParser, JSONParser\n\nfrom akvo.rsr.models import Organisation, Country\n\nfrom ..serializers import OrganisationSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n #id = find_text(tree, 'org_id')\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n filter_fields = ('name', 'long_name', 'iati_org_id', 'content_owner')\n\n def get_queryset(self):\n \"\"\" Enable filtering of Organisations on iati_org_id or name\n \"\"\"\n queryset = super(OrganisationViewSet, self).get_queryset()\n pk = self.request.QUERY_PARAMS.get('id', None)\n if pk is not None:\n try:\n queryset = queryset.filter(pk=pk)\n except ValueError:\n pass\n iati_org_id = self.request.QUERY_PARAMS.get('iati_org_id', None)\n if iati_org_id is not None:\n queryset = queryset.filter(iati_org_id=iati_org_id)\n name = self.request.QUERY_PARAMS.get('name', None)\n if name is not None:\n queryset = queryset.filter(name=name)\n long_name = self.request.QUERY_PARAMS.get('long_name', None)\n if long_name is not None:\n queryset = queryset.filter(long_name=long_name)\n return queryset\n", "path": "akvo/rest/views/organisation.py"}]} | 1,306 | 147 |
gh_patches_debug_13060 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-3089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError in ard module
With the command given below, I get the error message below. I'm using version 2014.06.09.
`youtube-dl http://www.ardmediathek.de/tv/Klassiker-der-Weltliteratur/Max-Frisch/BR-alpha/Video\?documentId\=19067308\&bcastId\=14913194`
```
[ARD] 19067308: Downloading webpage
[ARD] 19067308: Downloading JSON metadata
Traceback (most recent call last):
File "/usr/bin/youtube-dl", line 9, in <module>
load_entry_point('youtube-dl==2014.06.09', 'console_scripts', 'youtube-dl')()
File "/usr/lib/python3.4/site-packages/youtube_dl/__init__.py", line 853, in main
_real_main(argv)
File "/usr/lib/python3.4/site-packages/youtube_dl/__init__.py", line 843, in _real_main
retcode = ydl.download(all_urls)
File "/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py", line 1050, in download
self.extract_info(url)
File "/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py", line 516, in extract_info
ie_result = ie.extract(url)
File "/usr/lib/python3.4/site-packages/youtube_dl/extractor/common.py", line 168, in extract
return self._real_extract(url)
File "/usr/lib/python3.4/site-packages/youtube_dl/extractor/ard.py", line 66, in _real_extract
determine_ext(format['url']), format['quality'])
File "/usr/lib/python3.4/site-packages/youtube_dl/utils.py", line 845, in determine_ext
guess = url.partition(u'?')[0].rpartition(u'.')[2]
AttributeError: 'list' object has no attribute 'partition'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/ard.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 determine_ext,
9 ExtractorError,
10 )
11
12
13 class ARDIE(InfoExtractor):
14 _VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
15
16 _TEST = {
17 'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',
18 'file': '19288786.mp4',
19 'md5': '515bf47ce209fb3f5a61b7aad364634c',
20 'info_dict': {
21 'title': 'Edward Snowden im Interview - Held oder Verräter?',
22 'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',
23 'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',
24 },
25 'skip': 'Blocked outside of Germany',
26 }
27
28 def _real_extract(self, url):
29 # determine video id from url
30 m = re.match(self._VALID_URL, url)
31
32 numid = re.search(r'documentId=([0-9]+)', url)
33 if numid:
34 video_id = numid.group(1)
35 else:
36 video_id = m.group('video_id')
37
38 webpage = self._download_webpage(url, video_id)
39
40 title = self._html_search_regex(
41 [r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
42 r'<meta name="dcterms.title" content="(.*?)"/>',
43 r'<h4 class="headline">(.*?)</h4>'],
44 webpage, 'title')
45 description = self._html_search_meta(
46 'dcterms.abstract', webpage, 'description')
47 thumbnail = self._og_search_thumbnail(webpage)
48
49
50 media_info = self._download_json(
51 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)
52 # The second element of the _mediaArray contains the standard http urls
53 streams = media_info['_mediaArray'][1]['_mediaStreamArray']
54 if not streams:
55 if '"fsk"' in webpage:
56 raise ExtractorError('This video is only available after 20:00')
57
58 formats = []
59 for s in streams:
60 format = {
61 'quality': s['_quality'],
62 'url': s['_stream'],
63 }
64
65 format['format_id'] = '%s-%s' % (
66 determine_ext(format['url']), format['quality'])
67
68 formats.append(format)
69
70 self._sort_formats(formats)
71
72 return {
73 'id': video_id,
74 'title': title,
75 'description': description,
76 'formats': formats,
77 'thumbnail': thumbnail,
78 }
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py
--- a/youtube_dl/extractor/ard.py
+++ b/youtube_dl/extractor/ard.py
@@ -56,7 +56,18 @@
raise ExtractorError('This video is only available after 20:00')
formats = []
+
for s in streams:
+ if type(s['_stream']) == list:
+ for index, url in enumerate(s['_stream'][::-1]):
+ quality = s['_quality'] + index
+ formats.append({
+ 'quality': quality,
+ 'url': url,
+ 'format_id': '%s-%s' % (determine_ext(url), quality)
+ })
+ continue
+
format = {
'quality': s['_quality'],
'url': s['_stream'],
| {"golden_diff": "diff --git a/youtube_dl/extractor/ard.py b/youtube_dl/extractor/ard.py\n--- a/youtube_dl/extractor/ard.py\n+++ b/youtube_dl/extractor/ard.py\n@@ -56,7 +56,18 @@\n raise ExtractorError('This video is only available after 20:00')\n \n formats = []\n+\n for s in streams:\n+ if type(s['_stream']) == list:\n+ for index, url in enumerate(s['_stream'][::-1]):\n+ quality = s['_quality'] + index\n+ formats.append({\n+ 'quality': quality,\n+ 'url': url,\n+ 'format_id': '%s-%s' % (determine_ext(url), quality)\n+ })\n+ continue\n+\n format = {\n 'quality': s['_quality'],\n 'url': s['_stream'],\n", "issue": "AttributeError in ard module\nWith the command given below, I get the error message below. I'm using version 2014.06.09.\n\n`youtube-dl http://www.ardmediathek.de/tv/Klassiker-der-Weltliteratur/Max-Frisch/BR-alpha/Video\\?documentId\\=19067308\\&bcastId\\=14913194`\n\n```\n[ARD] 19067308: Downloading webpage\n[ARD] 19067308: Downloading JSON metadata\nTraceback (most recent call last):\n File \"/usr/bin/youtube-dl\", line 9, in <module>\n load_entry_point('youtube-dl==2014.06.09', 'console_scripts', 'youtube-dl')()\n File \"/usr/lib/python3.4/site-packages/youtube_dl/__init__.py\", line 853, in main\n _real_main(argv)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/__init__.py\", line 843, in _real_main\n retcode = ydl.download(all_urls)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py\", line 1050, in download\n self.extract_info(url)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/YoutubeDL.py\", line 516, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/extractor/common.py\", line 168, in extract\n return self._real_extract(url)\n File \"/usr/lib/python3.4/site-packages/youtube_dl/extractor/ard.py\", line 66, in _real_extract\n determine_ext(format['url']), format['quality'])\n File \"/usr/lib/python3.4/site-packages/youtube_dl/utils.py\", line 845, in determine_ext\n guess = url.partition(u'?')[0].rpartition(u'.')[2]\nAttributeError: 'list' object has no attribute 'partition'\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n)\n\n\nclass ARDIE(InfoExtractor):\n _VALID_URL = r'^https?://(?:(?:www\\.)?ardmediathek\\.de|mediathek\\.daserste\\.de)/(?:.*/)(?P<video_id>[^/\\?]+)(?:\\?.*)?'\n\n _TEST = {\n 'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',\n 'file': '19288786.mp4',\n 'md5': '515bf47ce209fb3f5a61b7aad364634c',\n 'info_dict': {\n 'title': 'Edward Snowden im Interview - Held oder Verr\u00e4ter?',\n 'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \\xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',\n 'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',\n },\n 'skip': 'Blocked outside of Germany',\n }\n\n def _real_extract(self, url):\n # determine video id from url\n m = re.match(self._VALID_URL, url)\n\n numid = re.search(r'documentId=([0-9]+)', url)\n if numid:\n video_id = numid.group(1)\n else:\n video_id = m.group('video_id')\n\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_regex(\n [r'<h1(?:\\s+class=\"boxTopHeadline\")?>(.*?)</h1>',\n r'<meta name=\"dcterms.title\" content=\"(.*?)\"/>',\n r'<h4 class=\"headline\">(.*?)</h4>'],\n webpage, 'title')\n description = self._html_search_meta(\n 'dcterms.abstract', webpage, 'description')\n thumbnail = self._og_search_thumbnail(webpage)\n\n\n media_info = self._download_json(\n 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)\n # The second element of the _mediaArray contains the standard http urls\n streams = media_info['_mediaArray'][1]['_mediaStreamArray']\n if not streams:\n if '\"fsk\"' in webpage:\n raise ExtractorError('This video is only available after 20:00')\n\n formats = []\n for s in streams:\n format = {\n 'quality': s['_quality'],\n 'url': s['_stream'],\n }\n\n format['format_id'] = '%s-%s' % (\n determine_ext(format['url']), format['quality'])\n\n formats.append(format)\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'formats': formats,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/ard.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n)\n\n\nclass ARDIE(InfoExtractor):\n _VALID_URL = r'^https?://(?:(?:www\\.)?ardmediathek\\.de|mediathek\\.daserste\\.de)/(?:.*/)(?P<video_id>[^/\\?]+)(?:\\?.*)?'\n\n _TEST = {\n 'url': 'http://www.ardmediathek.de/das-erste/guenther-jauch/edward-snowden-im-interview-held-oder-verraeter?documentId=19288786',\n 'file': '19288786.mp4',\n 'md5': '515bf47ce209fb3f5a61b7aad364634c',\n 'info_dict': {\n 'title': 'Edward Snowden im Interview - Held oder Verr\u00e4ter?',\n 'description': 'Edward Snowden hat alles aufs Spiel gesetzt, um die weltweite \\xdcberwachung durch die Geheimdienste zu enttarnen. Nun stellt sich der ehemalige NSA-Mitarbeiter erstmals weltweit in einem TV-Interview den Fragen eines NDR-Journalisten. Die Sendung vom Sonntagabend.',\n 'thumbnail': 'http://www.ardmediathek.de/ard/servlet/contentblob/19/28/87/90/19288790/bild/2250037',\n },\n 'skip': 'Blocked outside of Germany',\n }\n\n def _real_extract(self, url):\n # determine video id from url\n m = re.match(self._VALID_URL, url)\n\n numid = re.search(r'documentId=([0-9]+)', url)\n if numid:\n video_id = numid.group(1)\n else:\n video_id = m.group('video_id')\n\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_regex(\n [r'<h1(?:\\s+class=\"boxTopHeadline\")?>(.*?)</h1>',\n r'<meta name=\"dcterms.title\" content=\"(.*?)\"/>',\n r'<h4 class=\"headline\">(.*?)</h4>'],\n webpage, 'title')\n description = self._html_search_meta(\n 'dcterms.abstract', webpage, 'description')\n thumbnail = self._og_search_thumbnail(webpage)\n\n\n media_info = self._download_json(\n 'http://www.ardmediathek.de/play/media/%s' % video_id, video_id)\n # The second element of the _mediaArray contains the standard http urls\n streams = media_info['_mediaArray'][1]['_mediaStreamArray']\n if not streams:\n if '\"fsk\"' in webpage:\n raise ExtractorError('This video is only available after 20:00')\n\n formats = []\n\n for s in streams:\n if type(s['_stream']) == list:\n for index, url in enumerate(s['_stream'][::-1]):\n quality = s['_quality'] + index\n formats.append({\n 'quality': quality,\n 'url': url,\n 'format_id': '%s-%s' % (determine_ext(url), quality)\n })\n continue\n\n format = {\n 'quality': s['_quality'],\n 'url': s['_stream'],\n }\n\n format['format_id'] = '%s-%s' % (\n determine_ext(format['url']), format['quality'])\n\n formats.append(format)\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'formats': formats,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/ard.py"}]} | 1,675 | 196 |
gh_patches_debug_38422 | rasdani/github-patches | git_diff | encode__starlette-105 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Credentialed CORS standard requests should not respond with wildcard origins
See https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Credentialed_requests_and_wildcards
If a standard request is made, that includes any cookie headers, then CORSMiddleware *ought* to strictly respond with the requested origin, rather than a wildcard.
This is actually potentially a bit fiddly since we maybe also need to make sure to *set or add* Vary: Origin in those cases, in order to ensure correct cacheability.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/cors.py`
Content:
```
1 from starlette.datastructures import Headers, MutableHeaders, URL
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import functools
5 import typing
6 import re
7
8
9 ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
10
11
12 class CORSMiddleware:
13 def __init__(
14 self,
15 app: ASGIApp,
16 allow_origins: typing.Sequence[str] = (),
17 allow_methods: typing.Sequence[str] = ("GET",),
18 allow_headers: typing.Sequence[str] = (),
19 allow_credentials: bool = False,
20 allow_origin_regex: str = None,
21 expose_headers: typing.Sequence[str] = (),
22 max_age: int = 600,
23 ) -> None:
24
25 if "*" in allow_methods:
26 allow_methods = ALL_METHODS
27
28 compiled_allow_origin_regex = None
29 if allow_origin_regex is not None:
30 compiled_allow_origin_regex = re.compile(allow_origin_regex)
31
32 simple_headers = {}
33 if "*" in allow_origins:
34 simple_headers["Access-Control-Allow-Origin"] = "*"
35 if allow_credentials:
36 simple_headers["Access-Control-Allow-Credentials"] = "true"
37 if expose_headers:
38 simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
39
40 preflight_headers = {}
41 if "*" in allow_origins:
42 preflight_headers["Access-Control-Allow-Origin"] = "*"
43 else:
44 preflight_headers["Vary"] = "Origin"
45 preflight_headers.update(
46 {
47 "Access-Control-Allow-Methods": ", ".join(allow_methods),
48 "Access-Control-Max-Age": str(max_age),
49 }
50 )
51 if allow_headers and "*" not in allow_headers:
52 preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
53 if allow_credentials:
54 preflight_headers["Access-Control-Allow-Credentials"] = "true"
55
56 self.app = app
57 self.allow_origins = allow_origins
58 self.allow_methods = allow_methods
59 self.allow_headers = allow_headers
60 self.allow_all_origins = "*" in allow_origins
61 self.allow_all_headers = "*" in allow_headers
62 self.allow_origin_regex = compiled_allow_origin_regex
63 self.simple_headers = simple_headers
64 self.preflight_headers = preflight_headers
65
66 def __call__(self, scope: Scope):
67 if scope["type"] == "http":
68 method = scope["method"]
69 headers = Headers(scope["headers"])
70 origin = headers.get("origin")
71
72 if origin is not None:
73 if method == "OPTIONS" and "access-control-request-method" in headers:
74 return self.preflight_response(request_headers=headers)
75 else:
76 return functools.partial(
77 self.simple_response, scope=scope, origin=origin
78 )
79
80 return self.app(scope)
81
82 def is_allowed_origin(self, origin):
83 if self.allow_all_origins:
84 return True
85
86 if self.allow_origin_regex is not None and self.allow_origin_regex.match(
87 origin
88 ):
89 return True
90
91 return origin in self.allow_origins
92
93 def preflight_response(self, request_headers):
94 requested_origin = request_headers["origin"]
95 requested_method = request_headers["access-control-request-method"]
96 requested_headers = request_headers.get("access-control-request-headers")
97 requested_cookie = "cookie" in request_headers
98
99 headers = dict(self.preflight_headers)
100 failures = []
101
102 if self.is_allowed_origin(origin=requested_origin):
103 if not self.allow_all_origins:
104 # If self.allow_all_origins is True, then the "Access-Control-Allow-Origin"
105 # header is already set to "*".
106 # If we only allow specific origins, then we have to mirror back
107 # the Origin header in the response.
108 headers["Access-Control-Allow-Origin"] = requested_origin
109 else:
110 failures.append("origin")
111
112 if requested_method not in self.allow_methods:
113 failures.append("method")
114
115 # If we allow all headers, then we have to mirror back any requested
116 # headers in the response.
117 if self.allow_all_headers and requested_headers is not None:
118 headers["Access-Control-Allow-Headers"] = requested_headers
119 elif requested_headers is not None:
120 for header in requested_headers.split(","):
121 if header.strip() not in self.allow_headers:
122 failures.append("headers")
123
124 # We don't strictly need to use 400 responses here, since its up to
125 # the browser to enforce the CORS policy, but its more informative
126 # if we do.
127 if failures:
128 failure_text = "Disallowed CORS " + ", ".join(failures)
129 return PlainTextResponse(failure_text, status_code=400, headers=headers)
130
131 return PlainTextResponse("OK", status_code=200, headers=headers)
132
133 async def simple_response(self, receive, send, scope=None, origin=None):
134 inner = self.app(scope)
135 send = functools.partial(self.send, send=send, origin=origin)
136 await inner(receive, send)
137
138 async def send(self, message, send=None, origin=None):
139 if message["type"] != "http.response.start":
140 await send(message)
141 return
142
143 message.setdefault("headers", [])
144 headers = MutableHeaders(message["headers"])
145
146 # If we only allow specific origins, then we have to mirror back
147 # the Origin header in the response.
148 if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
149 headers["Access-Control-Allow-Origin"] = origin
150 headers.update(self.simple_headers)
151 await send(message)
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -32,6 +32,8 @@
simple_headers = {}
if "*" in allow_origins:
simple_headers["Access-Control-Allow-Origin"] = "*"
+ else:
+ simple_headers["Vary"] = "Origin"
if allow_credentials:
simple_headers["Access-Control-Allow-Credentials"] = "true"
if expose_headers:
@@ -74,7 +76,7 @@
return self.preflight_response(request_headers=headers)
else:
return functools.partial(
- self.simple_response, scope=scope, origin=origin
+ self.simple_response, scope=scope, request_headers=headers
)
return self.app(scope)
@@ -130,22 +132,31 @@
return PlainTextResponse("OK", status_code=200, headers=headers)
- async def simple_response(self, receive, send, scope=None, origin=None):
+ async def simple_response(self, receive, send, scope=None, request_headers=None):
inner = self.app(scope)
- send = functools.partial(self.send, send=send, origin=origin)
+ send = functools.partial(self.send, send=send, request_headers=request_headers)
await inner(receive, send)
- async def send(self, message, send=None, origin=None):
+ async def send(self, message, send=None, request_headers=None):
if message["type"] != "http.response.start":
await send(message)
return
message.setdefault("headers", [])
headers = MutableHeaders(message["headers"])
+ origin = request_headers["Origin"]
+ has_cookie = "cookie" in request_headers
+
+ # If request includes any cookie headers, then we must respond
+ # with the specific origin instead of '*'.
+ if self.allow_all_origins and has_cookie:
+ self.simple_headers["Access-Control-Allow-Origin"] = origin
# If we only allow specific origins, then we have to mirror back
# the Origin header in the response.
- if not self.allow_all_origins and self.is_allowed_origin(origin=origin):
+ elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):
headers["Access-Control-Allow-Origin"] = origin
+ if "vary" in headers:
+ self.simple_headers["Vary"] = f"{headers.get('vary')}, Origin"
headers.update(self.simple_headers)
await send(message)
| {"golden_diff": "diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py\n--- a/starlette/middleware/cors.py\n+++ b/starlette/middleware/cors.py\n@@ -32,6 +32,8 @@\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n+ else:\n+ simple_headers[\"Vary\"] = \"Origin\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n@@ -74,7 +76,7 @@\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n- self.simple_response, scope=scope, origin=origin\n+ self.simple_response, scope=scope, request_headers=headers\n )\n \n return self.app(scope)\n@@ -130,22 +132,31 @@\n \n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n \n- async def simple_response(self, receive, send, scope=None, origin=None):\n+ async def simple_response(self, receive, send, scope=None, request_headers=None):\n inner = self.app(scope)\n- send = functools.partial(self.send, send=send, origin=origin)\n+ send = functools.partial(self.send, send=send, request_headers=request_headers)\n await inner(receive, send)\n \n- async def send(self, message, send=None, origin=None):\n+ async def send(self, message, send=None, request_headers=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n \n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n+ origin = request_headers[\"Origin\"]\n+ has_cookie = \"cookie\" in request_headers\n+\n+ # If request includes any cookie headers, then we must respond\n+ # with the specific origin instead of '*'.\n+ if self.allow_all_origins and has_cookie:\n+ self.simple_headers[\"Access-Control-Allow-Origin\"] = origin\n \n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n- if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n+ elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n+ if \"vary\" in headers:\n+ self.simple_headers[\"Vary\"] = f\"{headers.get('vary')}, Origin\"\n headers.update(self.simple_headers)\n await send(message)\n", "issue": "Credentialed CORS standard requests should not respond with wildcard origins\nSee https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Credentialed_requests_and_wildcards \r\n\r\nIf a standard request is made, that includes any cookie headers, then CORSMiddleware *ought* to strictly respond with the requested origin, rather than a wildcard.\r\n\r\nThis is actually potentially a bit fiddly since we maybe also need to make sure to *set or add* Vary: Origin in those cases, in order to ensure correct cacheability.\n", "before_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\nimport re\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ) -> None:\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n compiled_allow_origin_regex = None\n if allow_origin_regex is not None:\n compiled_allow_origin_regex = re.compile(allow_origin_regex)\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.allow_origin_regex = compiled_allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def is_allowed_origin(self, origin):\n if self.allow_all_origins:\n return True\n\n if self.allow_origin_regex is not None and self.allow_origin_regex.match(\n origin\n ):\n return True\n\n return origin in self.allow_origins\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n if self.is_allowed_origin(origin=requested_origin):\n if not self.allow_all_origins:\n # If self.allow_all_origins is True, then the \"Access-Control-Allow-Origin\"\n # header is already set to \"*\".\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}], "after_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\nimport re\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n allow_origin_regex: str = None,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ) -> None:\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n compiled_allow_origin_regex = None\n if allow_origin_regex is not None:\n compiled_allow_origin_regex = re.compile(allow_origin_regex)\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n simple_headers[\"Vary\"] = \"Origin\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.allow_origin_regex = compiled_allow_origin_regex\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, request_headers=headers\n )\n\n return self.app(scope)\n\n def is_allowed_origin(self, origin):\n if self.allow_all_origins:\n return True\n\n if self.allow_origin_regex is not None and self.allow_origin_regex.match(\n origin\n ):\n return True\n\n return origin in self.allow_origins\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n if self.is_allowed_origin(origin=requested_origin):\n if not self.allow_all_origins:\n # If self.allow_all_origins is True, then the \"Access-Control-Allow-Origin\"\n # header is already set to \"*\".\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, request_headers=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, request_headers=request_headers)\n await inner(receive, send)\n\n async def send(self, message, send=None, request_headers=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n origin = request_headers[\"Origin\"]\n has_cookie = \"cookie\" in request_headers\n\n # If request includes any cookie headers, then we must respond\n # with the specific origin instead of '*'.\n if self.allow_all_origins and has_cookie:\n self.simple_headers[\"Access-Control-Allow-Origin\"] = origin\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n elif not self.allow_all_origins and self.is_allowed_origin(origin=origin):\n headers[\"Access-Control-Allow-Origin\"] = origin\n if \"vary\" in headers:\n self.simple_headers[\"Vary\"] = f\"{headers.get('vary')}, Origin\"\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}]} | 1,927 | 562 |
gh_patches_debug_16535 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_118 Fails With MonitoringInterval Integer Value
**Describe the issue**
CKV_AWS_118 fails if the `MonitoringInterval` value is not wrapped in double quotes despite the fact that the source code says it should allow ints and strings.
**Examples**
```
RDSinstance:
Type: AWS::RDS::DBInstance
Properties:
DBClusterIdentifier: !Ref DBCluster
DBInstanceClass: !Ref DbType
DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}
DBParameterGroupName: !Ref DbParameterGroup
DBSubnetGroupName: !Ref DBSubnetGroup
Engine: aurora-mysql
MonitoringInterval: 60
MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn
PubliclyAccessible: 'false'
```
**Version (please complete the following information):**
- Checkov Version 2.2.255 (CLI)
**Additional context**
The test failure happens with the CLI and also using a GItHub Action `bridgecrewio/checkov-action@master`

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/cloudformation/checks/resource/base_resource_value_check.py`
Content:
```
1 import re
2 from abc import abstractmethod
3 from collections.abc import Iterable
4 from typing import List, Any, Dict
5
6 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
7 from checkov.cloudformation.context_parser import ContextParser
8 from checkov.common.parsers.node import StrNode, DictNode
9 from checkov.common.models.consts import ANY_VALUE
10 from checkov.common.models.enums import CheckResult, CheckCategories
11 from checkov.common.util.type_forcers import force_list
12 from checkov.common.util.var_utils import is_cloudformation_variable_dependent
13
14 VARIABLE_DEPENDANT_REGEX = re.compile(r"(?:Ref)\.[^\s]+")
15
16
17 class BaseResourceValueCheck(BaseResourceCheck):
18 def __init__(
19 self,
20 name: str,
21 id: str,
22 categories: "Iterable[CheckCategories]",
23 supported_resources: "Iterable[str]",
24 missing_block_result: CheckResult = CheckResult.FAILED,
25 ) -> None:
26 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
27 self.missing_block_result = missing_block_result
28
29 @staticmethod
30 def _filter_key_path(path: str) -> List[str]:
31 """
32 Filter an attribute path to contain only named attributes by dropping array indices from the path)
33 :param path: valid JSONPath of an attribute
34 :return: List of named attributes with respect to the input JSONPath order
35 """
36 regex = re.compile(r"^\[?\d+\]?$")
37 return [x for x in path.split("/") if not re.search(regex, x)]
38
39 @staticmethod
40 def _is_variable_dependant(value: Any) -> bool:
41 return is_cloudformation_variable_dependent(value)
42
43 @staticmethod
44 def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:
45 """
46 Resolves whether a key is a subset of the inspected nesting attributes
47 :param inspected_attributes: list of nesting attributes
48 :param key: JSONPath key of an attribute
49 :return: True/False
50 """
51 return any(x in key for x in inspected_attributes)
52
53 def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:
54 inspected_key = self.get_inspected_key()
55 expected_values = self.get_expected_values()
56 path_elements = inspected_key.split("/")
57 matches = ContextParser.search_deep_keys(path_elements[-1], conf, [])
58 if len(matches) > 0:
59 for match in matches:
60 # CFN files are parsed differently from terraform, which causes the path search above to behave differently.
61 # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces
62 # those, allowing inspected_keys in checks to use the same syntax.
63 for i in range(0, len(match)):
64 if type(match[i]) == int:
65 match[i] = f"[{match[i]}]"
66
67 if match[:-1] == path_elements:
68 # Inspected key exists
69 value = match[-1]
70 if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):
71 # Key is found on the configuration - if it accepts any value, the check is PASSED
72 return CheckResult.PASSED
73 if isinstance(value, list) and len(value) == 1:
74 value = value[0]
75 if self._is_variable_dependant(value):
76 # If the tested attribute is variable-dependant, then result is PASSED
77 return CheckResult.PASSED
78 if value in expected_values:
79 return CheckResult.PASSED
80
81 # handle boolean case sensitivity (e.g., CFN accepts the string "true" as a boolean)
82 if isinstance(value, str) and value.lower() in ('true', 'false'):
83 value = value.lower() == 'true'
84 if value in expected_values:
85 return CheckResult.PASSED
86 return CheckResult.FAILED
87
88 return self.missing_block_result
89
90 @abstractmethod
91 def get_inspected_key(self) -> str:
92 """
93 :return: JSONPath syntax path of the checked attribute
94 """
95 raise NotImplementedError()
96
97 def get_expected_values(self) -> List[Any]:
98 """
99 Override the method with the list of acceptable values if the check has more than one possible expected value, given
100 the inspected key
101 :return: List of expected values, defaults to a list of the expected value
102 """
103 return [self.get_expected_value()]
104
105 def get_expected_value(self) -> Any:
106 """
107 Returns the default expected value, governed by provider best practices
108 """
109 return True
110
111 def get_evaluated_keys(self) -> List[str]:
112 return force_list(self.get_inspected_key())
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/cloudformation/checks/resource/base_resource_value_check.py b/checkov/cloudformation/checks/resource/base_resource_value_check.py
--- a/checkov/cloudformation/checks/resource/base_resource_value_check.py
+++ b/checkov/cloudformation/checks/resource/base_resource_value_check.py
@@ -60,7 +60,8 @@
# CFN files are parsed differently from terraform, which causes the path search above to behave differently.
# The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces
# those, allowing inspected_keys in checks to use the same syntax.
- for i in range(0, len(match)):
+ # The last value shouldn't be changed, because it could be indeed a valid number
+ for i in range(0, len(match) - 1):
if type(match[i]) == int:
match[i] = f"[{match[i]}]"
| {"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/base_resource_value_check.py b/checkov/cloudformation/checks/resource/base_resource_value_check.py\n--- a/checkov/cloudformation/checks/resource/base_resource_value_check.py\n+++ b/checkov/cloudformation/checks/resource/base_resource_value_check.py\n@@ -60,7 +60,8 @@\n # CFN files are parsed differently from terraform, which causes the path search above to behave differently.\n # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces\n # those, allowing inspected_keys in checks to use the same syntax.\n- for i in range(0, len(match)):\n+ # The last value shouldn't be changed, because it could be indeed a valid number\n+ for i in range(0, len(match) - 1):\n if type(match[i]) == int:\n match[i] = f\"[{match[i]}]\"\n", "issue": "CKV_AWS_118 Fails With MonitoringInterval Integer Value\n**Describe the issue**\r\nCKV_AWS_118 fails if the `MonitoringInterval` value is not wrapped in double quotes despite the fact that the source code says it should allow ints and strings.\r\n\r\n**Examples**\r\n```\r\nRDSinstance:\r\n Type: AWS::RDS::DBInstance\r\n Properties:\r\n DBClusterIdentifier: !Ref DBCluster\r\n DBInstanceClass: !Ref DbType\r\n DBInstanceIdentifier: !Sub ${AppName}-${EnvironmentName}\r\n DBParameterGroupName: !Ref DbParameterGroup\r\n DBSubnetGroupName: !Ref DBSubnetGroup\r\n Engine: aurora-mysql\r\n MonitoringInterval: 60\r\n MonitoringRoleArn: !GetAtt RdsMonitoringRole.Arn\r\n PubliclyAccessible: 'false'\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.2.255 (CLI)\r\n\r\n**Additional context**\r\nThe test failure happens with the CLI and also using a GItHub Action `bridgecrewio/checkov-action@master`\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import re\nfrom abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Any, Dict\n\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.cloudformation.context_parser import ContextParser\nfrom checkov.common.parsers.node import StrNode, DictNode\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.common.util.var_utils import is_cloudformation_variable_dependent\n\nVARIABLE_DEPENDANT_REGEX = re.compile(r\"(?:Ref)\\.[^\\s]+\")\n\n\nclass BaseResourceValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_block_result: CheckResult = CheckResult.FAILED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_block_result = missing_block_result\n\n @staticmethod\n def _filter_key_path(path: str) -> List[str]:\n \"\"\"\n Filter an attribute path to contain only named attributes by dropping array indices from the path)\n :param path: valid JSONPath of an attribute\n :return: List of named attributes with respect to the input JSONPath order\n \"\"\"\n regex = re.compile(r\"^\\[?\\d+\\]?$\")\n return [x for x in path.split(\"/\") if not re.search(regex, x)]\n\n @staticmethod\n def _is_variable_dependant(value: Any) -> bool:\n return is_cloudformation_variable_dependent(value)\n\n @staticmethod\n def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:\n \"\"\"\n Resolves whether a key is a subset of the inspected nesting attributes\n :param inspected_attributes: list of nesting attributes\n :param key: JSONPath key of an attribute\n :return: True/False\n \"\"\"\n return any(x in key for x in inspected_attributes)\n\n def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:\n inspected_key = self.get_inspected_key()\n expected_values = self.get_expected_values()\n path_elements = inspected_key.split(\"/\")\n matches = ContextParser.search_deep_keys(path_elements[-1], conf, [])\n if len(matches) > 0:\n for match in matches:\n # CFN files are parsed differently from terraform, which causes the path search above to behave differently.\n # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces\n # those, allowing inspected_keys in checks to use the same syntax.\n for i in range(0, len(match)):\n if type(match[i]) == int:\n match[i] = f\"[{match[i]}]\"\n\n if match[:-1] == path_elements:\n # Inspected key exists\n value = match[-1]\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\n return CheckResult.PASSED\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if self._is_variable_dependant(value):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n if value in expected_values:\n return CheckResult.PASSED\n\n # handle boolean case sensitivity (e.g., CFN accepts the string \"true\" as a boolean)\n if isinstance(value, str) and value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n if value in expected_values:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n return self.missing_block_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n def get_expected_values(self) -> List[Any]:\n \"\"\"\n Override the method with the list of acceptable values if the check has more than one possible expected value, given\n the inspected key\n :return: List of expected values, defaults to a list of the expected value\n \"\"\"\n return [self.get_expected_value()]\n\n def get_expected_value(self) -> Any:\n \"\"\"\n Returns the default expected value, governed by provider best practices\n \"\"\"\n return True\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/cloudformation/checks/resource/base_resource_value_check.py"}], "after_files": [{"content": "import re\nfrom abc import abstractmethod\nfrom collections.abc import Iterable\nfrom typing import List, Any, Dict\n\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.cloudformation.context_parser import ContextParser\nfrom checkov.common.parsers.node import StrNode, DictNode\nfrom checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.common.util.var_utils import is_cloudformation_variable_dependent\n\nVARIABLE_DEPENDANT_REGEX = re.compile(r\"(?:Ref)\\.[^\\s]+\")\n\n\nclass BaseResourceValueCheck(BaseResourceCheck):\n def __init__(\n self,\n name: str,\n id: str,\n categories: \"Iterable[CheckCategories]\",\n supported_resources: \"Iterable[str]\",\n missing_block_result: CheckResult = CheckResult.FAILED,\n ) -> None:\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n self.missing_block_result = missing_block_result\n\n @staticmethod\n def _filter_key_path(path: str) -> List[str]:\n \"\"\"\n Filter an attribute path to contain only named attributes by dropping array indices from the path)\n :param path: valid JSONPath of an attribute\n :return: List of named attributes with respect to the input JSONPath order\n \"\"\"\n regex = re.compile(r\"^\\[?\\d+\\]?$\")\n return [x for x in path.split(\"/\") if not re.search(regex, x)]\n\n @staticmethod\n def _is_variable_dependant(value: Any) -> bool:\n return is_cloudformation_variable_dependent(value)\n\n @staticmethod\n def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:\n \"\"\"\n Resolves whether a key is a subset of the inspected nesting attributes\n :param inspected_attributes: list of nesting attributes\n :param key: JSONPath key of an attribute\n :return: True/False\n \"\"\"\n return any(x in key for x in inspected_attributes)\n\n def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:\n inspected_key = self.get_inspected_key()\n expected_values = self.get_expected_values()\n path_elements = inspected_key.split(\"/\")\n matches = ContextParser.search_deep_keys(path_elements[-1], conf, [])\n if len(matches) > 0:\n for match in matches:\n # CFN files are parsed differently from terraform, which causes the path search above to behave differently.\n # The tesult is path parts with integer indexes, instead of strings like '[0]'. This logic replaces\n # those, allowing inspected_keys in checks to use the same syntax.\n # The last value shouldn't be changed, because it could be indeed a valid number\n for i in range(0, len(match) - 1):\n if type(match[i]) == int:\n match[i] = f\"[{match[i]}]\"\n\n if match[:-1] == path_elements:\n # Inspected key exists\n value = match[-1]\n if ANY_VALUE in expected_values and value is not None and (not isinstance(value, str) or value):\n # Key is found on the configuration - if it accepts any value, the check is PASSED\n return CheckResult.PASSED\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n if self._is_variable_dependant(value):\n # If the tested attribute is variable-dependant, then result is PASSED\n return CheckResult.PASSED\n if value in expected_values:\n return CheckResult.PASSED\n\n # handle boolean case sensitivity (e.g., CFN accepts the string \"true\" as a boolean)\n if isinstance(value, str) and value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n if value in expected_values:\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n return self.missing_block_result\n\n @abstractmethod\n def get_inspected_key(self) -> str:\n \"\"\"\n :return: JSONPath syntax path of the checked attribute\n \"\"\"\n raise NotImplementedError()\n\n def get_expected_values(self) -> List[Any]:\n \"\"\"\n Override the method with the list of acceptable values if the check has more than one possible expected value, given\n the inspected key\n :return: List of expected values, defaults to a list of the expected value\n \"\"\"\n return [self.get_expected_value()]\n\n def get_expected_value(self) -> Any:\n \"\"\"\n Returns the default expected value, governed by provider best practices\n \"\"\"\n return True\n\n def get_evaluated_keys(self) -> List[str]:\n return force_list(self.get_inspected_key())\n", "path": "checkov/cloudformation/checks/resource/base_resource_value_check.py"}]} | 1,841 | 202 |
gh_patches_debug_59532 | rasdani/github-patches | git_diff | mit-ll-responsible-ai__hydra-zen-97 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PEP 561 compatibility
Hi,
Would it be possible to make hydra-zen compliant with [PEP 561](https://www.python.org/dev/peps/pep-0561) by distributing a `py.typed` file with the package?
Currently I'm getting `Skipping analyzing "hydra_zen": found module but no type hints or library stubs` when I run mypy on a test file. Here are steps to reproduce this error:
```text
$ pip install hydra-zen mypy
...
Successfully installed PyYAML-5.4.1 antlr4-python3-runtime-4.8 hydra-core-1.1.1 hydra-zen-0.2.0 mypy-0.910 mypy-extensions-0.4.3 omegaconf-2.1.1 toml-0.10.2 typing-extensions-3.10.0.2
...
$ echo "from hydra_zen import builds" > tmp.py
$ mypy tmp.py
tmp.py:1: error: Skipping analyzing "hydra_zen": found module but no type hints or library stubs
tmp.py:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports
Found 1 error in 1 file (checked 1 source file)
```
I believe that adding an empty `py.typed` file to the `src/hydra_zen` directory (and modifying `setup.py` so that the `py.typed` file is distributed with the `hydra-zen` package) would make it possible for type checkers following PEP 561 to discover the type hints in `src`.
(I'd be happy to submit a PR to this effect.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2021 Massachusetts Institute of Technology
2 # SPDX-License-Identifier: MIT
3
4 from setuptools import find_packages, setup
5
6 import versioneer
7
8 DISTNAME = "hydra_zen"
9 LICENSE = "MIT"
10 AUTHOR = "Justin Goodwin, Ryan Soklaski"
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/mit-ll-responsible-ai/hydra_zen"
13 CLASSIFIERS = [
14 "Development Status :: 4 - Beta",
15 "License :: OSI Approved :: MIT License",
16 "Operating System :: OS Independent",
17 "Intended Audience :: Science/Research",
18 "Programming Language :: Python :: 3.6",
19 "Programming Language :: Python :: 3.7",
20 "Programming Language :: Python :: 3.8",
21 "Programming Language :: Python :: 3.9",
22 "Topic :: Scientific/Engineering",
23 ]
24 KEYWORDS = "machine learning research configuration scalable reproducible"
25 INSTALL_REQUIRES = [
26 "hydra-core >= 1.1.0",
27 "typing-extensions >= 3.7.4.1",
28 ]
29 TESTS_REQUIRE = [
30 "pytest >= 3.8",
31 "hypothesis >= 5.32.0",
32 ]
33
34 DESCRIPTION = "Utilities for making hydra scale to ML workflows"
35 LONG_DESCRIPTION = """
36 hydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!
37
38 hydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate "structured configurations" of your code, and enables Python-centric workflows for running configured instances of your code.
39
40 hydra-zen offers:
41
42 - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.
43 - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.
44 - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.
45 - Runtime validation of configurations to catch mistakes before your application launches.
46 - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).
47
48 These functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!
49 """
50
51
52 setup(
53 name=DISTNAME,
54 version=versioneer.get_version(),
55 cmdclass=versioneer.get_cmdclass(),
56 license=LICENSE,
57 author=AUTHOR,
58 author_email=AUTHOR_EMAIL,
59 classifiers=CLASSIFIERS,
60 keywords=KEYWORDS,
61 description=DESCRIPTION,
62 long_description=LONG_DESCRIPTION,
63 install_requires=INSTALL_REQUIRES,
64 tests_require=TESTS_REQUIRE,
65 url=URL,
66 download_url="https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/"
67 + versioneer.get_version(),
68 python_requires=">=3.6",
69 packages=find_packages(where="src", exclude=["tests", "tests.*"]),
70 package_dir={"": "src"},
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,4 +68,5 @@
python_requires=">=3.6",
packages=find_packages(where="src", exclude=["tests", "tests.*"]),
package_dir={"": "src"},
+ package_data={"hydra_zen": ["py.typed"]}
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,4 +68,5 @@\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n+ package_data={\"hydra_zen\": [\"py.typed\"]}\n )\n", "issue": "PEP 561 compatibility\nHi,\r\n\r\nWould it be possible to make hydra-zen compliant with [PEP 561](https://www.python.org/dev/peps/pep-0561) by distributing a `py.typed` file with the package?\r\n\r\nCurrently I'm getting `Skipping analyzing \"hydra_zen\": found module but no type hints or library stubs` when I run mypy on a test file. Here are steps to reproduce this error:\r\n```text\r\n$ pip install hydra-zen mypy\r\n...\r\nSuccessfully installed PyYAML-5.4.1 antlr4-python3-runtime-4.8 hydra-core-1.1.1 hydra-zen-0.2.0 mypy-0.910 mypy-extensions-0.4.3 omegaconf-2.1.1 toml-0.10.2 typing-extensions-3.10.0.2\r\n...\r\n$ echo \"from hydra_zen import builds\" > tmp.py\r\n$ mypy tmp.py\r\ntmp.py:1: error: Skipping analyzing \"hydra_zen\": found module but no type hints or library stubs\r\ntmp.py:1: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\r\nFound 1 error in 1 file (checked 1 source file)\r\n```\r\n\r\nI believe that adding an empty `py.typed` file to the `src/hydra_zen` directory (and modifying `setup.py` so that the `py.typed` file is distributed with the `hydra-zen` package) would make it possible for type checkers following PEP 561 to discover the type hints in `src`.\r\n(I'd be happy to submit a PR to this effect.)\n", "before_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nDISTNAME = \"hydra_zen\"\nLICENSE = \"MIT\"\nAUTHOR = \"Justin Goodwin, Ryan Soklaski\"\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/mit-ll-responsible-ai/hydra_zen\"\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n]\nKEYWORDS = \"machine learning research configuration scalable reproducible\"\nINSTALL_REQUIRES = [\n \"hydra-core >= 1.1.0\",\n \"typing-extensions >= 3.7.4.1\",\n]\nTESTS_REQUIRE = [\n \"pytest >= 3.8\",\n \"hypothesis >= 5.32.0\",\n]\n\nDESCRIPTION = \"Utilities for making hydra scale to ML workflows\"\nLONG_DESCRIPTION = \"\"\"\nhydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!\n\nhydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate \"structured configurations\" of your code, and enables Python-centric workflows for running configured instances of your code.\n\nhydra-zen offers:\n\n - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.\n - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.\n - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.\n - Runtime validation of configurations to catch mistakes before your application launches.\n - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).\n\nThese functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n keywords=KEYWORDS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n download_url=\"https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/\"\n + versioneer.get_version(),\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2021 Massachusetts Institute of Technology\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nDISTNAME = \"hydra_zen\"\nLICENSE = \"MIT\"\nAUTHOR = \"Justin Goodwin, Ryan Soklaski\"\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/mit-ll-responsible-ai/hydra_zen\"\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering\",\n]\nKEYWORDS = \"machine learning research configuration scalable reproducible\"\nINSTALL_REQUIRES = [\n \"hydra-core >= 1.1.0\",\n \"typing-extensions >= 3.7.4.1\",\n]\nTESTS_REQUIRE = [\n \"pytest >= 3.8\",\n \"hypothesis >= 5.32.0\",\n]\n\nDESCRIPTION = \"Utilities for making hydra scale to ML workflows\"\nLONG_DESCRIPTION = \"\"\"\nhydra-zen helps you configure your project using the power of Hydra, while enjoying the Zen of Python!\n\nhydra-zen eliminates the boilerplate code that you write to configure, orchestrate, and organize the results of large-scale projects, such as machine learning experiments. It does so by providing Hydra-compatible tools that dynamically generate \"structured configurations\" of your code, and enables Python-centric workflows for running configured instances of your code.\n\nhydra-zen offers:\n\n - Functions for automatically and dynamically generating structured configs that can be used to fully or partially instantiate objects in your application.\n - The ability to launch Hydra jobs, complete with parameter sweeps and multi-run configurations, from within a notebook or any other Python environment.\n - Incisive type annotations that provide enriched context about your project's configurations to IDEs, type checkers, and other tooling.\n - Runtime validation of configurations to catch mistakes before your application launches.\n - Equal support for both object-oriented libraries (e.g., torch.nn) and functional ones (e.g., jax and numpy).\n\nThese functions and capabilities can be used to great effect alongside PyTorch Lightning to design boilerplate-free machine learning projects!\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n keywords=KEYWORDS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n install_requires=INSTALL_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n download_url=\"https://github.com/mit-ll-responsible-ai/hydra-zen/tarball/\"\n + versioneer.get_version(),\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\", exclude=[\"tests\", \"tests.*\"]),\n package_dir={\"\": \"src\"},\n package_data={\"hydra_zen\": [\"py.typed\"]}\n)\n", "path": "setup.py"}]} | 1,473 | 81 |
gh_patches_debug_29551 | rasdani/github-patches | git_diff | doccano__doccano-1770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong progress in collaborative annotation ('Share annotations across all users')
How to reproduce the behaviour
---------
Progress is shown as individual progress instead of total progress when 'Share annotations across all users' is ticked in project setting.
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: wsl2+ubuntu20.04
* Python Version Used: 3.8
* When you install doccano: 20220403
* How did you install doccano (Heroku button etc): source
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/metrics/views.py`
Content:
```
1 import abc
2
3 from rest_framework import status
4 from rest_framework.permissions import IsAuthenticated
5 from rest_framework.response import Response
6 from rest_framework.views import APIView
7
8 from examples.models import Example, ExampleState
9 from label_types.models import CategoryType, LabelType, RelationType, SpanType
10 from labels.models import Category, Label, Relation, Span
11 from projects.models import Member
12 from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
13
14
15 class ProgressAPI(APIView):
16 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
17
18 def get(self, request, *args, **kwargs):
19 examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
20 total = examples.count()
21 complete = ExampleState.objects.count_done(examples, user=self.request.user)
22 data = {"total": total, "remaining": total - complete, "complete": complete}
23 return Response(data=data, status=status.HTTP_200_OK)
24
25
26 class MemberProgressAPI(APIView):
27 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
28
29 def get(self, request, *args, **kwargs):
30 examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
31 members = Member.objects.filter(project=self.kwargs["project_id"])
32 data = ExampleState.objects.measure_member_progress(examples, members)
33 return Response(data=data, status=status.HTTP_200_OK)
34
35
36 class LabelDistribution(abc.ABC, APIView):
37 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
38 model = Label
39 label_type = LabelType
40
41 def get(self, request, *args, **kwargs):
42 labels = self.label_type.objects.filter(project=self.kwargs["project_id"])
43 examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
44 members = Member.objects.filter(project=self.kwargs["project_id"])
45 data = self.model.objects.calc_label_distribution(examples, members, labels)
46 return Response(data=data, status=status.HTTP_200_OK)
47
48
49 class CategoryTypeDistribution(LabelDistribution):
50 model = Category
51 label_type = CategoryType
52
53
54 class SpanTypeDistribution(LabelDistribution):
55 model = Span
56 label_type = SpanType
57
58
59 class RelationTypeDistribution(LabelDistribution):
60 model = Relation
61 label_type = RelationType
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/metrics/views.py b/backend/metrics/views.py
--- a/backend/metrics/views.py
+++ b/backend/metrics/views.py
@@ -1,5 +1,6 @@
import abc
+from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
@@ -8,7 +9,7 @@
from examples.models import Example, ExampleState
from label_types.models import CategoryType, LabelType, RelationType, SpanType
from labels.models import Category, Label, Relation, Span
-from projects.models import Member
+from projects.models import Member, Project
from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
@@ -18,7 +19,11 @@
def get(self, request, *args, **kwargs):
examples = Example.objects.filter(project=self.kwargs["project_id"]).values("id")
total = examples.count()
- complete = ExampleState.objects.count_done(examples, user=self.request.user)
+ project = get_object_or_404(Project, pk=self.kwargs["project_id"])
+ if project.collaborative_annotation:
+ complete = ExampleState.objects.count_done(examples)
+ else:
+ complete = ExampleState.objects.count_done(examples, user=self.request.user)
data = {"total": total, "remaining": total - complete, "complete": complete}
return Response(data=data, status=status.HTTP_200_OK)
| {"golden_diff": "diff --git a/backend/metrics/views.py b/backend/metrics/views.py\n--- a/backend/metrics/views.py\n+++ b/backend/metrics/views.py\n@@ -1,5 +1,6 @@\n import abc\n \n+from django.shortcuts import get_object_or_404\n from rest_framework import status\n from rest_framework.permissions import IsAuthenticated\n from rest_framework.response import Response\n@@ -8,7 +9,7 @@\n from examples.models import Example, ExampleState\n from label_types.models import CategoryType, LabelType, RelationType, SpanType\n from labels.models import Category, Label, Relation, Span\n-from projects.models import Member\n+from projects.models import Member, Project\n from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n \n \n@@ -18,7 +19,11 @@\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n total = examples.count()\n- complete = ExampleState.objects.count_done(examples, user=self.request.user)\n+ project = get_object_or_404(Project, pk=self.kwargs[\"project_id\"])\n+ if project.collaborative_annotation:\n+ complete = ExampleState.objects.count_done(examples)\n+ else:\n+ complete = ExampleState.objects.count_done(examples, user=self.request.user)\n data = {\"total\": total, \"remaining\": total - complete, \"complete\": complete}\n return Response(data=data, status=status.HTTP_200_OK)\n", "issue": "Wrong progress in collaborative annotation ('Share annotations across all users')\nHow to reproduce the behaviour\r\n---------\r\nProgress is shown as individual progress instead of total progress when 'Share annotations across all users' is ticked in project setting.\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: wsl2+ubuntu20.04\r\n* Python Version Used: 3.8\r\n* When you install doccano: 20220403\r\n* How did you install doccano (Heroku button etc): source\r\n\n", "before_files": [{"content": "import abc\n\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom examples.models import Example, ExampleState\nfrom label_types.models import CategoryType, LabelType, RelationType, SpanType\nfrom labels.models import Category, Label, Relation, Span\nfrom projects.models import Member\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\n\nclass ProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n total = examples.count()\n complete = ExampleState.objects.count_done(examples, user=self.request.user)\n data = {\"total\": total, \"remaining\": total - complete, \"complete\": complete}\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass MemberProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = ExampleState.objects.measure_member_progress(examples, members)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass LabelDistribution(abc.ABC, APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n model = Label\n label_type = LabelType\n\n def get(self, request, *args, **kwargs):\n labels = self.label_type.objects.filter(project=self.kwargs[\"project_id\"])\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = self.model.objects.calc_label_distribution(examples, members, labels)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass CategoryTypeDistribution(LabelDistribution):\n model = Category\n label_type = CategoryType\n\n\nclass SpanTypeDistribution(LabelDistribution):\n model = Span\n label_type = SpanType\n\n\nclass RelationTypeDistribution(LabelDistribution):\n model = Relation\n label_type = RelationType\n", "path": "backend/metrics/views.py"}], "after_files": [{"content": "import abc\n\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom examples.models import Example, ExampleState\nfrom label_types.models import CategoryType, LabelType, RelationType, SpanType\nfrom labels.models import Category, Label, Relation, Span\nfrom projects.models import Member, Project\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\n\n\nclass ProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n total = examples.count()\n project = get_object_or_404(Project, pk=self.kwargs[\"project_id\"])\n if project.collaborative_annotation:\n complete = ExampleState.objects.count_done(examples)\n else:\n complete = ExampleState.objects.count_done(examples, user=self.request.user)\n data = {\"total\": total, \"remaining\": total - complete, \"complete\": complete}\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass MemberProgressAPI(APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n\n def get(self, request, *args, **kwargs):\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = ExampleState.objects.measure_member_progress(examples, members)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass LabelDistribution(abc.ABC, APIView):\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n model = Label\n label_type = LabelType\n\n def get(self, request, *args, **kwargs):\n labels = self.label_type.objects.filter(project=self.kwargs[\"project_id\"])\n examples = Example.objects.filter(project=self.kwargs[\"project_id\"]).values(\"id\")\n members = Member.objects.filter(project=self.kwargs[\"project_id\"])\n data = self.model.objects.calc_label_distribution(examples, members, labels)\n return Response(data=data, status=status.HTTP_200_OK)\n\n\nclass CategoryTypeDistribution(LabelDistribution):\n model = Category\n label_type = CategoryType\n\n\nclass SpanTypeDistribution(LabelDistribution):\n model = Span\n label_type = SpanType\n\n\nclass RelationTypeDistribution(LabelDistribution):\n model = Relation\n label_type = RelationType\n", "path": "backend/metrics/views.py"}]} | 999 | 320 |
gh_patches_debug_807 | rasdani/github-patches | git_diff | bokeh__bokeh-10106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `cd sphinx; make serve` doesn't work
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
Bokeh 2.0.2-76-ga417746c9
#### Description of expected behavior and the observed behavior
The page at https://docs.bokeh.org/en/latest/docs/dev_guide/documentation.html mentions that it's possible to run `make serve` to serve the documentation locally. But running it results in:
```
Exception in thread Thread-2:
Traceback (most recent call last):
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "docserver.py", line 43, in open_browser
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 78, in open
if browser.open(url, new, autoraise):
File "/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py", line 251, in open
"expected 0, 1, or 2, got %s" % new)
webbrowser.Error: Bad 'new' parameter to open(); expected 0, 1, or 2, got tab
```
Not sure where `"tab"` has come from, but it has been there forever.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sphinx/docserver.py`
Content:
```
1 import os
2 import sys
3 import threading
4 import time
5 import webbrowser
6
7 import flask
8 import tornado
9 from tornado.httpserver import HTTPServer
10 from tornado.ioloop import IOLoop
11 from tornado.wsgi import WSGIContainer
12
13 _basedir = os.path.join("..", os.path.dirname(__file__))
14
15 app = flask.Flask(__name__, static_folder="/unused")
16 PORT=5009
17 http_server = HTTPServer(WSGIContainer(app))
18
19 @app.route('/')
20 def welcome():
21 return """
22 <h1>Welcome to the Bokeh documentation server</h1>
23 You probably want to go to <a href="/en/latest/index.html"> Index</a>
24 """
25
26 @app.route('/versions.json')
27 def send_versions():
28 return flask.send_from_directory(
29 os.path.join(_basedir, "sphinx"), "test_versions.json")
30
31 @app.route('/alert.html')
32 def send_alert():
33 return os.environ.get("BOKEH_DOCS_ALERT", "")
34
35 @app.route('/en/latest/<path:filename>')
36 def send_docs(filename):
37 return flask.send_from_directory(
38 os.path.join(_basedir, "sphinx/build/html/"), filename)
39
40 def open_browser():
41 # Child process
42 time.sleep(0.5)
43 webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
44
45 data = {}
46
47 def serve_http():
48 data['ioloop'] = IOLoop()
49 http_server.listen(PORT)
50 IOLoop.current().start()
51
52 def shutdown_server():
53 ioloop = data['ioloop']
54 ioloop.add_callback(ioloop.stop)
55 print("Asked Server to shut down.")
56
57 def ui():
58 try:
59 time.sleep(0.5)
60 input("Press <ENTER> to exit...\n") # lgtm [py/use-of-input]
61 except KeyboardInterrupt:
62 pass
63
64 if __name__ == "__main__":
65
66 if tornado.version_info[0] == 4:
67 print('docserver.py script requires tornado 5 or higher')
68 sys.exit(1)
69
70 print("\nStarting Bokeh plot server on port %d..." % PORT)
71 print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
72
73 t_server = threading.Thread(target=serve_http)
74 t_server.start()
75 t_browser = threading.Thread(target=open_browser)
76 t_browser.start()
77
78 ui()
79
80 shutdown_server()
81 t_server.join()
82 t_browser.join()
83 print("Server shut down.")
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sphinx/docserver.py b/sphinx/docserver.py
--- a/sphinx/docserver.py
+++ b/sphinx/docserver.py
@@ -40,7 +40,7 @@
def open_browser():
# Child process
time.sleep(0.5)
- webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
+ webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new=2)
data = {}
| {"golden_diff": "diff --git a/sphinx/docserver.py b/sphinx/docserver.py\n--- a/sphinx/docserver.py\n+++ b/sphinx/docserver.py\n@@ -40,7 +40,7 @@\n def open_browser():\n # Child process\n time.sleep(0.5)\n- webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n+ webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=2)\n \n data = {}\n", "issue": "[BUG] `cd sphinx; make serve` doesn't work\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\nBokeh 2.0.2-76-ga417746c9\r\n\r\n#### Description of expected behavior and the observed behavior\r\nThe page at https://docs.bokeh.org/en/latest/docs/dev_guide/documentation.html mentions that it's possible to run `make serve` to serve the documentation locally. But running it results in:\r\n```\r\nException in thread Thread-2:\r\nTraceback (most recent call last):\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py\", line 917, in _bootstrap_inner\r\n self.run()\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/threading.py\", line 865, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"docserver.py\", line 43, in open_browser\r\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py\", line 78, in open\r\n if browser.open(url, new, autoraise):\r\n File \"/home/p-himik/soft/miniconda3/envs/bokeh-dev/lib/python3.7/webbrowser.py\", line 251, in open\r\n \"expected 0, 1, or 2, got %s\" % new)\r\nwebbrowser.Error: Bad 'new' parameter to open(); expected 0, 1, or 2, got tab\r\n```\r\nNot sure where `\"tab\"` has come from, but it has been there forever.\n", "before_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}], "after_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=2)\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}]} | 1,373 | 111 |
gh_patches_debug_3215 | rasdani/github-patches | git_diff | python-discord__bot-733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Write unit tests for `bot/rules/newlines.py`
Write unit tests for [`bot/rules/newlines.py`](../blob/master/bot/rules/newlines.py).
## Implementation details
Please make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.
## Additional information
If you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/rules/attachments.py`
Content:
```
1 from typing import Dict, Iterable, List, Optional, Tuple
2
3 from discord import Member, Message
4
5
6 async def apply(
7 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
9 """Detects total attachments exceeding the limit sent by a single user."""
10 relevant_messages = tuple(
11 msg
12 for msg in recent_messages
13 if (
14 msg.author == last_message.author
15 and len(msg.attachments) > 0
16 )
17 )
18 total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
19
20 if total_recent_attachments > config['max']:
21 return (
22 f"sent {total_recent_attachments} attachments in {config['max']}s",
23 (last_message.author,),
24 relevant_messages
25 )
26 return None
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py
--- a/bot/rules/attachments.py
+++ b/bot/rules/attachments.py
@@ -19,7 +19,7 @@
if total_recent_attachments > config['max']:
return (
- f"sent {total_recent_attachments} attachments in {config['max']}s",
+ f"sent {total_recent_attachments} attachments in {config['interval']}s",
(last_message.author,),
relevant_messages
)
| {"golden_diff": "diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py\n--- a/bot/rules/attachments.py\n+++ b/bot/rules/attachments.py\n@@ -19,7 +19,7 @@\n \n if total_recent_attachments > config['max']:\n return (\n- f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n+ f\"sent {total_recent_attachments} attachments in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n", "issue": "Write unit tests for `bot/rules/newlines.py`\nWrite unit tests for [`bot/rules/newlines.py`](../blob/master/bot/rules/newlines.py).\n\n## Implementation details\nPlease make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.\n\n## Additional information\nIf you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.\n\n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}], "after_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['interval']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}]} | 674 | 112 |
gh_patches_debug_26022 | rasdani/github-patches | git_diff | mindee__doctr-173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[docs] Add a visualization of the example script in the README
While the readme specifies how you can use the example script, it does not show any visualization examples. We could easily add one to help users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doctr/utils/visualization.py`
Content:
```
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 import matplotlib.pyplot as plt
7 import matplotlib.patches as patches
8 import mplcursors
9 import numpy as np
10 from typing import Tuple, List, Dict, Any
11
12 from .common_types import BoundingBox
13
14 __all__ = ['visualize_page']
15
16
17 def create_patch(
18 geometry: BoundingBox,
19 label: str,
20 page_dimensions: Tuple[int, int],
21 color: Tuple[int, int, int],
22 alpha: float = 0.3,
23 linewidth: int = 2,
24 ) -> patches.Patch:
25 """Create a matplotlib patch (rectangle) bounding the element
26
27 Args:
28 geometry: bounding box of the element
29 label: label to display when hovered
30 page_dimensions: dimensions of the Page
31 color: color to draw box
32 alpha: opacity parameter to fill the boxes, 0 = transparent
33 linewidth: line width
34
35 Returns:
36 a rectangular Patch
37 """
38 h, w = page_dimensions
39 (xmin, ymin), (xmax, ymax) = geometry
40 xmin, xmax = xmin * w, xmax * w
41 ymin, ymax = ymin * h, ymax * h
42 rect = patches.Rectangle(
43 (xmin, ymin),
44 xmax - xmin,
45 ymax - ymin,
46 fill=True,
47 linewidth=linewidth,
48 edgecolor=(*color, alpha),
49 facecolor=(*color, alpha),
50 label=label
51 )
52 return rect
53
54
55 def visualize_page(
56 page: Dict[str, Any],
57 image: np.ndarray,
58 words_only: bool = True,
59 ) -> None:
60 """Visualize a full page with predicted blocks, lines and words
61
62 Example::
63 >>> import numpy as np
64 >>> import matplotlib.pyplot as plt
65 >>> from doctr.utils.visualization import visualize_page
66 >>> from doctr.models import ocr_db_crnn
67 >>> model = ocr_db_crnn(pretrained=True)
68 >>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)
69 >>> out = model([[input_page]])
70 >>> visualize_page(out[0].pages[0].export(), input_page)
71 >>> plt.show()
72
73 Args:
74 page: the exported Page of a Document
75 image: np array of the page, needs to have the same shape than page['dimensions']
76 words_only: whether only words should be displayed
77 """
78 # Display the image
79 _, ax = plt.subplots()
80 ax.imshow(image)
81 # hide both axis
82 ax.axis('off')
83
84 artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
85
86 for block in page['blocks']:
87 if not words_only:
88 rect = create_patch(block['geometry'], 'block', page['dimensions'], (0, 1, 0), linewidth=1)
89 # add patch on figure
90 ax.add_patch(rect)
91 # add patch to cursor's artists
92 artists.append(rect)
93
94 for line in block['lines']:
95 if not words_only:
96 rect = create_patch(line['geometry'], 'line', page['dimensions'], (1, 0, 0), linewidth=1)
97 ax.add_patch(rect)
98 artists.append(rect)
99
100 for word in line['words']:
101 rect = create_patch(word['geometry'], f"{word['value']} (confidence: {word['confidence']:.2%})",
102 page['dimensions'], (0, 0, 1))
103 ax.add_patch(rect)
104 artists.append(rect)
105
106 if not words_only:
107 for artefact in block['artefacts']:
108 rect = create_patch(artefact['geometry'], 'artefact', page['dimensions'], (0.5, 0.5, 0.5), linewidth=1)
109 ax.add_patch(rect)
110 artists.append(rect)
111
112 # Create mlp Cursor to hover patches in artists
113 mplcursors.Cursor(artists, hover=2).connect("add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doctr/utils/visualization.py b/doctr/utils/visualization.py
--- a/doctr/utils/visualization.py
+++ b/doctr/utils/visualization.py
@@ -56,6 +56,7 @@
page: Dict[str, Any],
image: np.ndarray,
words_only: bool = True,
+ scale: float = 10,
) -> None:
"""Visualize a full page with predicted blocks, lines and words
@@ -74,9 +75,13 @@
page: the exported Page of a Document
image: np array of the page, needs to have the same shape than page['dimensions']
words_only: whether only words should be displayed
+ scale: figsize of the largest windows side
"""
+ # Get proper scale and aspect ratio
+ h, w = image.shape[:2]
+ size = (scale * w / h, scale) if h > w else (scale, h / w * scale)
+ fig, ax = plt.subplots(figsize=size)
# Display the image
- _, ax = plt.subplots()
ax.imshow(image)
# hide both axis
ax.axis('off')
@@ -111,3 +116,4 @@
# Create mlp Cursor to hover patches in artists
mplcursors.Cursor(artists, hover=2).connect("add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
+ fig.tight_layout()
| {"golden_diff": "diff --git a/doctr/utils/visualization.py b/doctr/utils/visualization.py\n--- a/doctr/utils/visualization.py\n+++ b/doctr/utils/visualization.py\n@@ -56,6 +56,7 @@\n page: Dict[str, Any],\n image: np.ndarray,\n words_only: bool = True,\n+ scale: float = 10,\n ) -> None:\n \"\"\"Visualize a full page with predicted blocks, lines and words\n \n@@ -74,9 +75,13 @@\n page: the exported Page of a Document\n image: np array of the page, needs to have the same shape than page['dimensions']\n words_only: whether only words should be displayed\n+ scale: figsize of the largest windows side\n \"\"\"\n+ # Get proper scale and aspect ratio\n+ h, w = image.shape[:2]\n+ size = (scale * w / h, scale) if h > w else (scale, h / w * scale)\n+ fig, ax = plt.subplots(figsize=size)\n # Display the image\n- _, ax = plt.subplots()\n ax.imshow(image)\n # hide both axis\n ax.axis('off')\n@@ -111,3 +116,4 @@\n \n # Create mlp Cursor to hover patches in artists\n mplcursors.Cursor(artists, hover=2).connect(\"add\", lambda sel: sel.annotation.set_text(sel.artist.get_label()))\n+ fig.tight_layout()\n", "issue": "[docs] Add a visualization of the example script in the README\nWhile the readme specifies how you can use the example script, it does not show any visualization examples. We could easily add one to help users.\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport mplcursors\nimport numpy as np\nfrom typing import Tuple, List, Dict, Any\n\nfrom .common_types import BoundingBox\n\n__all__ = ['visualize_page']\n\n\ndef create_patch(\n geometry: BoundingBox,\n label: str,\n page_dimensions: Tuple[int, int],\n color: Tuple[int, int, int],\n alpha: float = 0.3,\n linewidth: int = 2,\n) -> patches.Patch:\n \"\"\"Create a matplotlib patch (rectangle) bounding the element\n\n Args:\n geometry: bounding box of the element\n label: label to display when hovered\n page_dimensions: dimensions of the Page\n color: color to draw box\n alpha: opacity parameter to fill the boxes, 0 = transparent\n linewidth: line width\n\n Returns:\n a rectangular Patch\n \"\"\"\n h, w = page_dimensions\n (xmin, ymin), (xmax, ymax) = geometry\n xmin, xmax = xmin * w, xmax * w\n ymin, ymax = ymin * h, ymax * h\n rect = patches.Rectangle(\n (xmin, ymin),\n xmax - xmin,\n ymax - ymin,\n fill=True,\n linewidth=linewidth,\n edgecolor=(*color, alpha),\n facecolor=(*color, alpha),\n label=label\n )\n return rect\n\n\ndef visualize_page(\n page: Dict[str, Any],\n image: np.ndarray,\n words_only: bool = True,\n) -> None:\n \"\"\"Visualize a full page with predicted blocks, lines and words\n\n Example::\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from doctr.utils.visualization import visualize_page\n >>> from doctr.models import ocr_db_crnn\n >>> model = ocr_db_crnn(pretrained=True)\n >>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)\n >>> out = model([[input_page]])\n >>> visualize_page(out[0].pages[0].export(), input_page)\n >>> plt.show()\n\n Args:\n page: the exported Page of a Document\n image: np array of the page, needs to have the same shape than page['dimensions']\n words_only: whether only words should be displayed\n \"\"\"\n # Display the image\n _, ax = plt.subplots()\n ax.imshow(image)\n # hide both axis\n ax.axis('off')\n\n artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)\n\n for block in page['blocks']:\n if not words_only:\n rect = create_patch(block['geometry'], 'block', page['dimensions'], (0, 1, 0), linewidth=1)\n # add patch on figure\n ax.add_patch(rect)\n # add patch to cursor's artists\n artists.append(rect)\n\n for line in block['lines']:\n if not words_only:\n rect = create_patch(line['geometry'], 'line', page['dimensions'], (1, 0, 0), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n for word in line['words']:\n rect = create_patch(word['geometry'], f\"{word['value']} (confidence: {word['confidence']:.2%})\",\n page['dimensions'], (0, 0, 1))\n ax.add_patch(rect)\n artists.append(rect)\n\n if not words_only:\n for artefact in block['artefacts']:\n rect = create_patch(artefact['geometry'], 'artefact', page['dimensions'], (0.5, 0.5, 0.5), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n # Create mlp Cursor to hover patches in artists\n mplcursors.Cursor(artists, hover=2).connect(\"add\", lambda sel: sel.annotation.set_text(sel.artist.get_label()))\n", "path": "doctr/utils/visualization.py"}], "after_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport mplcursors\nimport numpy as np\nfrom typing import Tuple, List, Dict, Any\n\nfrom .common_types import BoundingBox\n\n__all__ = ['visualize_page']\n\n\ndef create_patch(\n geometry: BoundingBox,\n label: str,\n page_dimensions: Tuple[int, int],\n color: Tuple[int, int, int],\n alpha: float = 0.3,\n linewidth: int = 2,\n) -> patches.Patch:\n \"\"\"Create a matplotlib patch (rectangle) bounding the element\n\n Args:\n geometry: bounding box of the element\n label: label to display when hovered\n page_dimensions: dimensions of the Page\n color: color to draw box\n alpha: opacity parameter to fill the boxes, 0 = transparent\n linewidth: line width\n\n Returns:\n a rectangular Patch\n \"\"\"\n h, w = page_dimensions\n (xmin, ymin), (xmax, ymax) = geometry\n xmin, xmax = xmin * w, xmax * w\n ymin, ymax = ymin * h, ymax * h\n rect = patches.Rectangle(\n (xmin, ymin),\n xmax - xmin,\n ymax - ymin,\n fill=True,\n linewidth=linewidth,\n edgecolor=(*color, alpha),\n facecolor=(*color, alpha),\n label=label\n )\n return rect\n\n\ndef visualize_page(\n page: Dict[str, Any],\n image: np.ndarray,\n words_only: bool = True,\n scale: float = 10,\n) -> None:\n \"\"\"Visualize a full page with predicted blocks, lines and words\n\n Example::\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from doctr.utils.visualization import visualize_page\n >>> from doctr.models import ocr_db_crnn\n >>> model = ocr_db_crnn(pretrained=True)\n >>> input_page = (255 * np.random.rand(600, 800, 3)).astype(np.uint8)\n >>> out = model([[input_page]])\n >>> visualize_page(out[0].pages[0].export(), input_page)\n >>> plt.show()\n\n Args:\n page: the exported Page of a Document\n image: np array of the page, needs to have the same shape than page['dimensions']\n words_only: whether only words should be displayed\n scale: figsize of the largest windows side\n \"\"\"\n # Get proper scale and aspect ratio\n h, w = image.shape[:2]\n size = (scale * w / h, scale) if h > w else (scale, h / w * scale)\n fig, ax = plt.subplots(figsize=size)\n # Display the image\n ax.imshow(image)\n # hide both axis\n ax.axis('off')\n\n artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)\n\n for block in page['blocks']:\n if not words_only:\n rect = create_patch(block['geometry'], 'block', page['dimensions'], (0, 1, 0), linewidth=1)\n # add patch on figure\n ax.add_patch(rect)\n # add patch to cursor's artists\n artists.append(rect)\n\n for line in block['lines']:\n if not words_only:\n rect = create_patch(line['geometry'], 'line', page['dimensions'], (1, 0, 0), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n for word in line['words']:\n rect = create_patch(word['geometry'], f\"{word['value']} (confidence: {word['confidence']:.2%})\",\n page['dimensions'], (0, 0, 1))\n ax.add_patch(rect)\n artists.append(rect)\n\n if not words_only:\n for artefact in block['artefacts']:\n rect = create_patch(artefact['geometry'], 'artefact', page['dimensions'], (0.5, 0.5, 0.5), linewidth=1)\n ax.add_patch(rect)\n artists.append(rect)\n\n # Create mlp Cursor to hover patches in artists\n mplcursors.Cursor(artists, hover=2).connect(\"add\", lambda sel: sel.annotation.set_text(sel.artist.get_label()))\n fig.tight_layout()\n", "path": "doctr/utils/visualization.py"}]} | 1,461 | 317 |
gh_patches_debug_36520 | rasdani/github-patches | git_diff | vacanza__python-holidays-1555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update Denmark holidays
I've received an email with a link to https://www.norden.org/en/info-norden/public-holidays-denmark
The author complained about absence of June 5th in the list of holiday dates:
> The calendar for Denmark does not include 5 June.
Denmark holidays need to be extended using categories approach.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/countries/denmark.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2023
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 from datetime import timedelta as td
13 from gettext import gettext as tr
14
15 from holidays.groups import ChristianHolidays, InternationalHolidays
16 from holidays.holiday_base import HolidayBase
17
18
19 class Denmark(HolidayBase, ChristianHolidays, InternationalHolidays):
20 """
21 Denmark holidays.
22
23 References:
24 - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark
25 - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
26 """
27
28 country = "DK"
29 default_language = "da"
30 supported_languages = ("da", "en_US", "uk")
31
32 def __init__(self, *args, **kwargs):
33 ChristianHolidays.__init__(self)
34 InternationalHolidays.__init__(self)
35 super().__init__(*args, **kwargs)
36
37 def _populate(self, year):
38 super()._populate(year)
39
40 # New Year's Day.
41 self._add_new_years_day(tr("Nytårsdag"))
42
43 # Holy Thursday.
44 self._add_holy_thursday(tr("Skærtorsdag"))
45
46 # Good Friday.
47 self._add_good_friday(tr("Langfredag"))
48
49 # Easter Sunday.
50 self._add_easter_sunday(tr("Påskedag"))
51
52 # Easter Monday.
53 self._add_easter_monday(tr("Anden påskedag"))
54
55 # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
56 if year <= 2023:
57 # Great Day of Prayers.
58 self._add_holiday(tr("Store bededag"), self._easter_sunday + td(days=+26))
59
60 # Ascension Day.
61 self._add_ascension_thursday(tr("Kristi himmelfartsdag"))
62
63 # Whit Sunday.
64 self._add_whit_sunday(tr("Pinsedag"))
65
66 # Whit Monday.
67 self._add_whit_monday(tr("Anden pinsedag"))
68
69 # Christmas Day.
70 self._add_christmas_day(tr("Juledag"))
71
72 # Second Day of Christmas.
73 self._add_christmas_day_two(tr("Anden juledag"))
74
75
76 class DK(Denmark):
77 pass
78
79
80 class DNK(Denmark):
81 pass
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/countries/denmark.py b/holidays/countries/denmark.py
--- a/holidays/countries/denmark.py
+++ b/holidays/countries/denmark.py
@@ -12,6 +12,7 @@
from datetime import timedelta as td
from gettext import gettext as tr
+from holidays.constants import OPTIONAL, PUBLIC
from holidays.groups import ChristianHolidays, InternationalHolidays
from holidays.holiday_base import HolidayBase
@@ -22,11 +23,13 @@
References:
- https://en.wikipedia.org/wiki/Public_holidays_in_Denmark
+ - https://www.norden.org/en/info-norden/public-holidays-denmark
- https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
"""
country = "DK"
default_language = "da"
+ supported_categories = {OPTIONAL, PUBLIC}
supported_languages = ("da", "en_US", "uk")
def __init__(self, *args, **kwargs):
@@ -34,9 +37,7 @@
InternationalHolidays.__init__(self)
super().__init__(*args, **kwargs)
- def _populate(self, year):
- super()._populate(year)
-
+ def _populate_public_holidays(self):
# New Year's Day.
self._add_new_years_day(tr("Nytårsdag"))
@@ -53,7 +54,7 @@
self._add_easter_monday(tr("Anden påskedag"))
# See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm
- if year <= 2023:
+ if self._year <= 2023:
# Great Day of Prayers.
self._add_holiday(tr("Store bededag"), self._easter_sunday + td(days=+26))
@@ -72,6 +73,19 @@
# Second Day of Christmas.
self._add_christmas_day_two(tr("Anden juledag"))
+ def _populate_optional_holidays(self):
+ # International Workers' Day.
+ self._add_labor_day(tr("Arbejdernes kampdag"))
+
+ # Constitution Day.
+ self._add_holiday_jun_5(tr("Grundlovsdag"))
+
+ # Christmas Eve.
+ self._add_christmas_eve(tr("Juleaftensdag"))
+
+ # New Year's Eve.
+ self._add_new_years_eve(tr("Nytårsaften"))
+
class DK(Denmark):
pass
| {"golden_diff": "diff --git a/holidays/countries/denmark.py b/holidays/countries/denmark.py\n--- a/holidays/countries/denmark.py\n+++ b/holidays/countries/denmark.py\n@@ -12,6 +12,7 @@\n from datetime import timedelta as td\n from gettext import gettext as tr\n \n+from holidays.constants import OPTIONAL, PUBLIC\n from holidays.groups import ChristianHolidays, InternationalHolidays\n from holidays.holiday_base import HolidayBase\n \n@@ -22,11 +23,13 @@\n \n References:\n - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark\n+ - https://www.norden.org/en/info-norden/public-holidays-denmark\n - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n \"\"\"\n \n country = \"DK\"\n default_language = \"da\"\n+ supported_categories = {OPTIONAL, PUBLIC}\n supported_languages = (\"da\", \"en_US\", \"uk\")\n \n def __init__(self, *args, **kwargs):\n@@ -34,9 +37,7 @@\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n \n- def _populate(self, year):\n- super()._populate(year)\n-\n+ def _populate_public_holidays(self):\n # New Year's Day.\n self._add_new_years_day(tr(\"Nyt\u00e5rsdag\"))\n \n@@ -53,7 +54,7 @@\n self._add_easter_monday(tr(\"Anden p\u00e5skedag\"))\n \n # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n- if year <= 2023:\n+ if self._year <= 2023:\n # Great Day of Prayers.\n self._add_holiday(tr(\"Store bededag\"), self._easter_sunday + td(days=+26))\n \n@@ -72,6 +73,19 @@\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Anden juledag\"))\n \n+ def _populate_optional_holidays(self):\n+ # International Workers' Day.\n+ self._add_labor_day(tr(\"Arbejdernes kampdag\"))\n+\n+ # Constitution Day.\n+ self._add_holiday_jun_5(tr(\"Grundlovsdag\"))\n+\n+ # Christmas Eve.\n+ self._add_christmas_eve(tr(\"Juleaftensdag\"))\n+\n+ # New Year's Eve.\n+ self._add_new_years_eve(tr(\"Nyt\u00e5rsaften\"))\n+\n \n class DK(Denmark):\n pass\n", "issue": "Update Denmark holidays\nI've received an email with a link to https://www.norden.org/en/info-norden/public-holidays-denmark\r\n\r\nThe author complained about absence of June 5th in the list of holiday dates:\r\n\r\n> The calendar for Denmark does not include 5 June.\r\n\r\nDenmark holidays need to be extended using categories approach.\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.groups import ChristianHolidays, InternationalHolidays\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Denmark(HolidayBase, ChristianHolidays, InternationalHolidays):\n \"\"\"\n Denmark holidays.\n\n References:\n - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark\n - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n \"\"\"\n\n country = \"DK\"\n default_language = \"da\"\n supported_languages = (\"da\", \"en_US\", \"uk\")\n\n def __init__(self, *args, **kwargs):\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _populate(self, year):\n super()._populate(year)\n\n # New Year's Day.\n self._add_new_years_day(tr(\"Nyt\u00e5rsdag\"))\n\n # Holy Thursday.\n self._add_holy_thursday(tr(\"Sk\u00e6rtorsdag\"))\n\n # Good Friday.\n self._add_good_friday(tr(\"Langfredag\"))\n\n # Easter Sunday.\n self._add_easter_sunday(tr(\"P\u00e5skedag\"))\n\n # Easter Monday.\n self._add_easter_monday(tr(\"Anden p\u00e5skedag\"))\n\n # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n if year <= 2023:\n # Great Day of Prayers.\n self._add_holiday(tr(\"Store bededag\"), self._easter_sunday + td(days=+26))\n\n # Ascension Day.\n self._add_ascension_thursday(tr(\"Kristi himmelfartsdag\"))\n\n # Whit Sunday.\n self._add_whit_sunday(tr(\"Pinsedag\"))\n\n # Whit Monday.\n self._add_whit_monday(tr(\"Anden pinsedag\"))\n\n # Christmas Day.\n self._add_christmas_day(tr(\"Juledag\"))\n\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Anden juledag\"))\n\n\nclass DK(Denmark):\n pass\n\n\nclass DNK(Denmark):\n pass\n", "path": "holidays/countries/denmark.py"}], "after_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2023\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import timedelta as td\nfrom gettext import gettext as tr\n\nfrom holidays.constants import OPTIONAL, PUBLIC\nfrom holidays.groups import ChristianHolidays, InternationalHolidays\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Denmark(HolidayBase, ChristianHolidays, InternationalHolidays):\n \"\"\"\n Denmark holidays.\n\n References:\n - https://en.wikipedia.org/wiki/Public_holidays_in_Denmark\n - https://www.norden.org/en/info-norden/public-holidays-denmark\n - https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n \"\"\"\n\n country = \"DK\"\n default_language = \"da\"\n supported_categories = {OPTIONAL, PUBLIC}\n supported_languages = (\"da\", \"en_US\", \"uk\")\n\n def __init__(self, *args, **kwargs):\n ChristianHolidays.__init__(self)\n InternationalHolidays.__init__(self)\n super().__init__(*args, **kwargs)\n\n def _populate_public_holidays(self):\n # New Year's Day.\n self._add_new_years_day(tr(\"Nyt\u00e5rsdag\"))\n\n # Holy Thursday.\n self._add_holy_thursday(tr(\"Sk\u00e6rtorsdag\"))\n\n # Good Friday.\n self._add_good_friday(tr(\"Langfredag\"))\n\n # Easter Sunday.\n self._add_easter_sunday(tr(\"P\u00e5skedag\"))\n\n # Easter Monday.\n self._add_easter_monday(tr(\"Anden p\u00e5skedag\"))\n\n # See https://www.ft.dk/samling/20222/lovforslag/l13/index.htm\n if self._year <= 2023:\n # Great Day of Prayers.\n self._add_holiday(tr(\"Store bededag\"), self._easter_sunday + td(days=+26))\n\n # Ascension Day.\n self._add_ascension_thursday(tr(\"Kristi himmelfartsdag\"))\n\n # Whit Sunday.\n self._add_whit_sunday(tr(\"Pinsedag\"))\n\n # Whit Monday.\n self._add_whit_monday(tr(\"Anden pinsedag\"))\n\n # Christmas Day.\n self._add_christmas_day(tr(\"Juledag\"))\n\n # Second Day of Christmas.\n self._add_christmas_day_two(tr(\"Anden juledag\"))\n\n def _populate_optional_holidays(self):\n # International Workers' Day.\n self._add_labor_day(tr(\"Arbejdernes kampdag\"))\n\n # Constitution Day.\n self._add_holiday_jun_5(tr(\"Grundlovsdag\"))\n\n # Christmas Eve.\n self._add_christmas_eve(tr(\"Juleaftensdag\"))\n\n # New Year's Eve.\n self._add_new_years_eve(tr(\"Nyt\u00e5rsaften\"))\n\n\nclass DK(Denmark):\n pass\n\n\nclass DNK(Denmark):\n pass\n", "path": "holidays/countries/denmark.py"}]} | 1,143 | 598 |
gh_patches_debug_21675 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
counting contributions to polls on module tile
as discussed please count the comments AND all answers on poll module tiles.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projects/templatetags/meinberlin_project_tags.py`
Content:
```
1 from django import template
2
3 from adhocracy4.comments.models import Comment
4 from meinberlin.apps.budgeting.models import Proposal as budget_proposal
5 from meinberlin.apps.ideas.models import Idea
6 from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
7 from meinberlin.apps.mapideas.models import MapIdea
8 from meinberlin.apps.projects import get_project_type
9
10 register = template.Library()
11
12
13 @register.filter
14 def project_url(project):
15 if get_project_type(project) in ('external', 'bplan'):
16 return project.externalproject.url
17 return project.get_absolute_url()
18
19
20 @register.filter
21 def project_type(project):
22 return get_project_type(project)
23
24
25 @register.filter
26 def is_external(project):
27 return get_project_type(project) in ('external', 'bplan')
28
29
30 @register.filter
31 def is_container(project):
32 return get_project_type(project) == 'container'
33
34
35 @register.simple_tag
36 def to_class_name(value):
37 return value.__class__.__name__
38
39
40 @register.simple_tag
41 def get_num_entries(module):
42 """Count all user-generated items."""
43 item_count = \
44 Idea.objects.filter(module=module).count() \
45 + MapIdea.objects.filter(module=module).count() \
46 + budget_proposal.objects.filter(module=module).count() \
47 + kiezkasse_proposal.objects.filter(module=module).count() \
48 + Comment.objects.filter(idea__module=module).count() \
49 + Comment.objects.filter(mapidea__module=module).count() \
50 + Comment.objects.filter(budget_proposal__module=module).count() \
51 + Comment.objects.filter(kiezkasse_proposal__module=module).count() \
52 + Comment.objects.filter(topic__module=module).count() \
53 + Comment.objects.filter(maptopic__module=module).count() \
54 + Comment.objects.filter(paragraph__chapter__module=module).count() \
55 + Comment.objects.filter(chapter__module=module).count() \
56 + Comment.objects.filter(poll__module=module).count()
57 return item_count
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py
@@ -5,6 +5,7 @@
from meinberlin.apps.ideas.models import Idea
from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal
from meinberlin.apps.mapideas.models import MapIdea
+from meinberlin.apps.polls.models import Vote
from meinberlin.apps.projects import get_project_type
register = template.Library()
@@ -53,5 +54,6 @@
+ Comment.objects.filter(maptopic__module=module).count() \
+ Comment.objects.filter(paragraph__chapter__module=module).count() \
+ Comment.objects.filter(chapter__module=module).count() \
- + Comment.objects.filter(poll__module=module).count()
+ + Comment.objects.filter(poll__module=module).count() \
+ + Vote.objects.filter(choice__question__poll__module=module).count()
return item_count
| {"golden_diff": "diff --git a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n--- a/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n+++ b/meinberlin/apps/projects/templatetags/meinberlin_project_tags.py\n@@ -5,6 +5,7 @@\n from meinberlin.apps.ideas.models import Idea\n from meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\n from meinberlin.apps.mapideas.models import MapIdea\n+from meinberlin.apps.polls.models import Vote\n from meinberlin.apps.projects import get_project_type\n \n register = template.Library()\n@@ -53,5 +54,6 @@\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n- + Comment.objects.filter(poll__module=module).count()\n+ + Comment.objects.filter(poll__module=module).count() \\\n+ + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "issue": "counting contributions to polls on module tile\nas discussed please count the comments AND all answers on poll module tiles.\n", "before_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if get_project_type(project) in ('external', 'bplan'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return get_project_type(project) in ('external', 'bplan')\n\n\[email protected]\ndef is_container(project):\n return get_project_type(project) == 'container'\n\n\[email protected]_tag\ndef to_class_name(value):\n return value.__class__.__name__\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}], "after_files": [{"content": "from django import template\n\nfrom adhocracy4.comments.models import Comment\nfrom meinberlin.apps.budgeting.models import Proposal as budget_proposal\nfrom meinberlin.apps.ideas.models import Idea\nfrom meinberlin.apps.kiezkasse.models import Proposal as kiezkasse_proposal\nfrom meinberlin.apps.mapideas.models import MapIdea\nfrom meinberlin.apps.polls.models import Vote\nfrom meinberlin.apps.projects import get_project_type\n\nregister = template.Library()\n\n\[email protected]\ndef project_url(project):\n if get_project_type(project) in ('external', 'bplan'):\n return project.externalproject.url\n return project.get_absolute_url()\n\n\[email protected]\ndef project_type(project):\n return get_project_type(project)\n\n\[email protected]\ndef is_external(project):\n return get_project_type(project) in ('external', 'bplan')\n\n\[email protected]\ndef is_container(project):\n return get_project_type(project) == 'container'\n\n\[email protected]_tag\ndef to_class_name(value):\n return value.__class__.__name__\n\n\[email protected]_tag\ndef get_num_entries(module):\n \"\"\"Count all user-generated items.\"\"\"\n item_count = \\\n Idea.objects.filter(module=module).count() \\\n + MapIdea.objects.filter(module=module).count() \\\n + budget_proposal.objects.filter(module=module).count() \\\n + kiezkasse_proposal.objects.filter(module=module).count() \\\n + Comment.objects.filter(idea__module=module).count() \\\n + Comment.objects.filter(mapidea__module=module).count() \\\n + Comment.objects.filter(budget_proposal__module=module).count() \\\n + Comment.objects.filter(kiezkasse_proposal__module=module).count() \\\n + Comment.objects.filter(topic__module=module).count() \\\n + Comment.objects.filter(maptopic__module=module).count() \\\n + Comment.objects.filter(paragraph__chapter__module=module).count() \\\n + Comment.objects.filter(chapter__module=module).count() \\\n + Comment.objects.filter(poll__module=module).count() \\\n + Vote.objects.filter(choice__question__poll__module=module).count()\n return item_count\n", "path": "meinberlin/apps/projects/templatetags/meinberlin_project_tags.py"}]} | 851 | 280 |
gh_patches_debug_4165 | rasdani/github-patches | git_diff | ivy-llc__ivy-14979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
extract
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/sorting_searching_counting/searching.py`
Content:
```
1 # local
2
3 import ivy
4
5 from ivy.functional.frontends.numpy import promote_types_of_numpy_inputs
6
7 from ivy.functional.frontends.numpy.func_wrapper import (
8 to_ivy_arrays_and_back,
9 from_zero_dim_arrays_to_scalar,
10 handle_numpy_out,
11 )
12
13
14 @to_ivy_arrays_and_back
15 def where(cond, x1=None, x2=None, /):
16 if x1 is None and x2 is None:
17 # numpy where behaves as np.asarray(condition).nonzero() when x and y
18 # not included
19 return ivy.asarray(cond).nonzero()
20 elif x1 is not None and x2 is not None:
21 x1, x2 = promote_types_of_numpy_inputs(x1, x2)
22 return ivy.where(cond, x1, x2)
23 else:
24 raise ivy.utils.exceptions.IvyException("where takes either 1 or 3 arguments")
25
26
27 @to_ivy_arrays_and_back
28 def nonzero(a):
29 return ivy.nonzero(a)
30
31
32 @handle_numpy_out
33 @to_ivy_arrays_and_back
34 @from_zero_dim_arrays_to_scalar
35 def argmin(a, /, *, axis=None, keepdims=False, out=None):
36 return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)
37
38
39 @handle_numpy_out
40 @to_ivy_arrays_and_back
41 @from_zero_dim_arrays_to_scalar
42 def argmax(
43 a,
44 /,
45 *,
46 axis=None,
47 out=None,
48 keepdims=False,
49 ):
50 return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)
51
52
53 @to_ivy_arrays_and_back
54 def flatnonzero(a):
55 return ivy.nonzero(ivy.reshape(a, (-1,)))
56
57
58 @to_ivy_arrays_and_back
59 def searchsorted(a, v, side="left", sorter=None):
60 return ivy.searchsorted(a, v, side=side, sorter=sorter)
61
62
63 @to_ivy_arrays_and_back
64 def argwhere(a):
65 return ivy.argwhere(a)
66
67
68 # nanargmin and nanargmax composition helper
69 def _nanargminmax(a, axis=None):
70 # check nans
71 nans = ivy.isnan(a).astype(ivy.bool)
72 # replace nans with inf
73 a = ivy.where(nans, ivy.inf, a)
74 if nans is not None:
75 nans = ivy.all(nans, axis=axis)
76 if ivy.any(nans):
77 raise ivy.utils.exceptions.IvyError("All-NaN slice encountered")
78 return a
79
80
81 @handle_numpy_out
82 @to_ivy_arrays_and_back
83 @from_zero_dim_arrays_to_scalar
84 def nanargmax(a, /, *, axis=None, out=None, keepdims=False):
85 a = _nanargminmax(a, axis=axis)
86 return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)
87
88
89 @handle_numpy_out
90 @to_ivy_arrays_and_back
91 @from_zero_dim_arrays_to_scalar
92 def nanargmin(a, /, *, axis=None, out=None, keepdims=False):
93 a = _nanargminmax(a, axis=axis)
94 return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py
--- a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py
+++ b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py
@@ -92,3 +92,11 @@
def nanargmin(a, /, *, axis=None, out=None, keepdims=False):
a = _nanargminmax(a, axis=axis)
return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)
+
+
+@to_ivy_arrays_and_back
+def extract(cond, arr, /):
+ if cond.dtype == 'bool':
+ return arr[cond]
+ else:
+ return arr[cond !=0]
\ No newline at end of file
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py\n--- a/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py\n+++ b/ivy/functional/frontends/numpy/sorting_searching_counting/searching.py\n@@ -92,3 +92,11 @@\n def nanargmin(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)\n+\n+\n+@to_ivy_arrays_and_back\n+def extract(cond, arr, /):\n+ if cond.dtype == 'bool':\n+ return arr[cond]\n+ else:\n+ return arr[cond !=0]\n\\ No newline at end of file\n", "issue": "extract\n\n", "before_files": [{"content": "# local\n\nimport ivy\n\nfrom ivy.functional.frontends.numpy import promote_types_of_numpy_inputs\n\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n handle_numpy_out,\n)\n\n\n@to_ivy_arrays_and_back\ndef where(cond, x1=None, x2=None, /):\n if x1 is None and x2 is None:\n # numpy where behaves as np.asarray(condition).nonzero() when x and y\n # not included\n return ivy.asarray(cond).nonzero()\n elif x1 is not None and x2 is not None:\n x1, x2 = promote_types_of_numpy_inputs(x1, x2)\n return ivy.where(cond, x1, x2)\n else:\n raise ivy.utils.exceptions.IvyException(\"where takes either 1 or 3 arguments\")\n\n\n@to_ivy_arrays_and_back\ndef nonzero(a):\n return ivy.nonzero(a)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmin(a, /, *, axis=None, keepdims=False, out=None):\n return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmax(\n a,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n):\n return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef flatnonzero(a):\n return ivy.nonzero(ivy.reshape(a, (-1,)))\n\n\n@to_ivy_arrays_and_back\ndef searchsorted(a, v, side=\"left\", sorter=None):\n return ivy.searchsorted(a, v, side=side, sorter=sorter)\n\n\n@to_ivy_arrays_and_back\ndef argwhere(a):\n return ivy.argwhere(a)\n\n\n# nanargmin and nanargmax composition helper\ndef _nanargminmax(a, axis=None):\n # check nans\n nans = ivy.isnan(a).astype(ivy.bool)\n # replace nans with inf\n a = ivy.where(nans, ivy.inf, a)\n if nans is not None:\n nans = ivy.all(nans, axis=axis)\n if ivy.any(nans):\n raise ivy.utils.exceptions.IvyError(\"All-NaN slice encountered\")\n return a\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmax(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmin(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)\n", "path": "ivy/functional/frontends/numpy/sorting_searching_counting/searching.py"}], "after_files": [{"content": "# local\n\nimport ivy\n\nfrom ivy.functional.frontends.numpy import promote_types_of_numpy_inputs\n\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n handle_numpy_out,\n)\n\n\n@to_ivy_arrays_and_back\ndef where(cond, x1=None, x2=None, /):\n if x1 is None and x2 is None:\n # numpy where behaves as np.asarray(condition).nonzero() when x and y\n # not included\n return ivy.asarray(cond).nonzero()\n elif x1 is not None and x2 is not None:\n x1, x2 = promote_types_of_numpy_inputs(x1, x2)\n return ivy.where(cond, x1, x2)\n else:\n raise ivy.utils.exceptions.IvyException(\"where takes either 1 or 3 arguments\")\n\n\n@to_ivy_arrays_and_back\ndef nonzero(a):\n return ivy.nonzero(a)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmin(a, /, *, axis=None, keepdims=False, out=None):\n return ivy.argmin(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef argmax(\n a,\n /,\n *,\n axis=None,\n out=None,\n keepdims=False,\n):\n return ivy.argmax(a, axis=axis, out=out, keepdims=keepdims)\n\n\n@to_ivy_arrays_and_back\ndef flatnonzero(a):\n return ivy.nonzero(ivy.reshape(a, (-1,)))\n\n\n@to_ivy_arrays_and_back\ndef searchsorted(a, v, side=\"left\", sorter=None):\n return ivy.searchsorted(a, v, side=side, sorter=sorter)\n\n\n@to_ivy_arrays_and_back\ndef argwhere(a):\n return ivy.argwhere(a)\n\n\n# nanargmin and nanargmax composition helper\ndef _nanargminmax(a, axis=None):\n # check nans\n nans = ivy.isnan(a).astype(ivy.bool)\n # replace nans with inf\n a = ivy.where(nans, ivy.inf, a)\n if nans is not None:\n nans = ivy.all(nans, axis=axis)\n if ivy.any(nans):\n raise ivy.utils.exceptions.IvyError(\"All-NaN slice encountered\")\n return a\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmax(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmax(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@handle_numpy_out\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef nanargmin(a, /, *, axis=None, out=None, keepdims=False):\n a = _nanargminmax(a, axis=axis)\n return ivy.argmin(a, axis=axis, keepdims=keepdims, out=out)\n\n\n@to_ivy_arrays_and_back\ndef extract(cond, arr, /):\n if cond.dtype == 'bool':\n return arr[cond]\n else:\n return arr[cond !=0]", "path": "ivy/functional/frontends/numpy/sorting_searching_counting/searching.py"}]} | 1,167 | 204 |
gh_patches_debug_31606 | rasdani/github-patches | git_diff | fossasia__open-event-server-3128 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not show deleted orders in organiser ui and do not auto delete expired orders
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/helpers/scheduled_jobs.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from dateutil.relativedelta import relativedelta
4 from flask import url_for
5 from sqlalchemy_continuum import transaction_class
6
7 from app.helpers.data import DataManager, delete_from_db, save_to_db
8 from app.helpers.data_getter import DataGetter
9 from app.helpers.helpers import send_after_event, monthdelta, send_followup_email_for_monthly_fee_payment
10 from app.helpers.helpers import send_email_for_expired_orders, send_email_for_monthly_fee_payment
11 from app.helpers.payment import get_fee
12 from app.helpers.ticketing import TicketingManager
13 from app.models.event import Event
14 from app.models.event_invoice import EventInvoice
15 from app.models.order import Order
16 from app.models.session import Session
17 from app.models.user import User
18
19
20 def empty_trash():
21 from app import current_app as app
22 with app.app_context():
23 events = Event.query.filter_by(in_trash=True)
24 users = User.query.filter_by(in_trash=True)
25 sessions = Session.query.filter_by(in_trash=True)
26 orders = Order.query.filter_by(status="deleted")
27 pending_orders = Order.query.filter_by(status="pending")
28 expired_orders = Order.query.filter_by(status="expired")
29 for event in events:
30 if datetime.now() - event.trash_date >= timedelta(days=30):
31 DataManager.delete_event(event.id)
32
33 for user in users:
34 if datetime.now() - user.trash_date >= timedelta(days=30):
35 transaction = transaction_class(Event)
36 transaction.query.filter_by(user_id=user.id).delete()
37 delete_from_db(user, "User deleted permanently")
38
39 for session_ in sessions:
40 if datetime.now() - session_.trash_date >= timedelta(days=30):
41 delete_from_db(session_, "Session deleted permanently")
42
43 for order in orders:
44 if datetime.now() - order.trashed_at >= timedelta(days=30):
45 delete_from_db(order, "Order deleted permanently")
46
47 for pending_order in pending_orders:
48 if datetime.now() - pending_order.created_at >= timedelta(days=3):
49 pending_order.status = "expired"
50 save_to_db(pending_order, "Pending order expired.")
51
52 for expired_order in expired_orders:
53 if datetime.now() - expired_order.created_at >= timedelta(days=6):
54 expired_order.status = "deleted"
55 expired_order.trashed_at = datetime.now()
56 save_to_db(expired_order, "Expired order deleted")
57
58
59 def send_after_event_mail():
60 from app import current_app as app
61 with app.app_context():
62 events = Event.query.all()
63 for event in events:
64 upcoming_events = DataGetter.get_upcoming_events()
65 organizers = DataGetter.get_user_event_roles_by_role_name(
66 event.id, 'organizer')
67 speakers = DataGetter.get_user_event_roles_by_role_name(event.id,
68 'speaker')
69 if datetime.now() > event.end_time:
70 for speaker in speakers:
71 send_after_event(speaker.user.email, event.id,
72 upcoming_events)
73 for organizer in organizers:
74 send_after_event(organizer.user.email, event.id,
75 upcoming_events)
76
77
78 def send_mail_to_expired_orders():
79 from app import current_app as app
80 with app.app_context():
81 orders = DataGetter.get_expired_orders()
82 for order in orders:
83 send_email_for_expired_orders(order.user.email, order.event.name, order.get_invoice_number(),
84 url_for('ticketing.view_order_after_payment',
85 order_identifier=order.identifier, _external=True))
86
87
88 def send_event_fee_notification():
89 from app import current_app as app
90 with app.app_context():
91 events = Event.query.all()
92 for event in events:
93 latest_invoice = EventInvoice.filter_by(event_id=event.id).order_by(EventInvoice.created_at.desc()).first()
94
95 if latest_invoice:
96 orders = Order.query \
97 .filter_by(event_id=event.id) \
98 .filter_by(status='completed') \
99 .filter(Order.completed_at > latest_invoice.created_at).all()
100 else:
101 orders = Order.query.filter_by(event_id=event.id).filter_by(status='completed').all()
102
103 fee_total = 0
104 for order in orders:
105 for order_ticket in order.tickets:
106 ticket = TicketingManager.get_ticket(order_ticket.ticket_id)
107 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
108 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)
109 fee_total += fee
110
111 if fee_total > 0:
112 new_invoice = EventInvoice(amount=fee_total, event_id=event.id, user_id=event.creator_id)
113
114 if event.discount_code_id and event.discount_code:
115 r = relativedelta(datetime.utcnow(), event.created_at)
116 if r <= event.discount_code.max_quantity:
117 new_invoice.amount = fee_total - (fee_total * (event.discount_code.value / 100.0))
118 new_invoice.discount_code_id = event.discount_code_id
119
120 save_to_db(new_invoice)
121 prev_month = monthdelta(new_invoice.created_at, 1).strftime("%b %Y") # Displayed as Aug 2016
122 send_email_for_monthly_fee_payment(new_invoice.user.email,
123 event.name,
124 prev_month,
125 new_invoice.amount,
126 url_for('event_invoicing.view_invoice',
127 invoice_identifier=new_invoice.identifier, _external=True))
128
129
130 def send_event_fee_notification_followup():
131 from app import current_app as app
132 with app.app_context():
133 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()
134 for incomplete_invoice in incomplete_invoices:
135 if incomplete_invoice.amount > 0:
136 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime("%b %Y") # Displayed as Aug 2016
137 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,
138 incomplete_invoice.event.name,
139 prev_month,
140 incomplete_invoice.amount,
141 url_for('event_invoicing.view_invoice',
142 invoice_identifier=incomplete_invoice.identifier,
143 _external=True))
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/helpers/scheduled_jobs.py b/app/helpers/scheduled_jobs.py
--- a/app/helpers/scheduled_jobs.py
+++ b/app/helpers/scheduled_jobs.py
@@ -23,9 +23,8 @@
events = Event.query.filter_by(in_trash=True)
users = User.query.filter_by(in_trash=True)
sessions = Session.query.filter_by(in_trash=True)
- orders = Order.query.filter_by(status="deleted")
pending_orders = Order.query.filter_by(status="pending")
- expired_orders = Order.query.filter_by(status="expired")
+
for event in events:
if datetime.now() - event.trash_date >= timedelta(days=30):
DataManager.delete_event(event.id)
@@ -40,21 +39,11 @@
if datetime.now() - session_.trash_date >= timedelta(days=30):
delete_from_db(session_, "Session deleted permanently")
- for order in orders:
- if datetime.now() - order.trashed_at >= timedelta(days=30):
- delete_from_db(order, "Order deleted permanently")
-
for pending_order in pending_orders:
if datetime.now() - pending_order.created_at >= timedelta(days=3):
pending_order.status = "expired"
save_to_db(pending_order, "Pending order expired.")
- for expired_order in expired_orders:
- if datetime.now() - expired_order.created_at >= timedelta(days=6):
- expired_order.status = "deleted"
- expired_order.trashed_at = datetime.now()
- save_to_db(expired_order, "Expired order deleted")
-
def send_after_event_mail():
from app import current_app as app
| {"golden_diff": "diff --git a/app/helpers/scheduled_jobs.py b/app/helpers/scheduled_jobs.py\n--- a/app/helpers/scheduled_jobs.py\n+++ b/app/helpers/scheduled_jobs.py\n@@ -23,9 +23,8 @@\n events = Event.query.filter_by(in_trash=True)\n users = User.query.filter_by(in_trash=True)\n sessions = Session.query.filter_by(in_trash=True)\n- orders = Order.query.filter_by(status=\"deleted\")\n pending_orders = Order.query.filter_by(status=\"pending\")\n- expired_orders = Order.query.filter_by(status=\"expired\")\n+\n for event in events:\n if datetime.now() - event.trash_date >= timedelta(days=30):\n DataManager.delete_event(event.id)\n@@ -40,21 +39,11 @@\n if datetime.now() - session_.trash_date >= timedelta(days=30):\n delete_from_db(session_, \"Session deleted permanently\")\n \n- for order in orders:\n- if datetime.now() - order.trashed_at >= timedelta(days=30):\n- delete_from_db(order, \"Order deleted permanently\")\n-\n for pending_order in pending_orders:\n if datetime.now() - pending_order.created_at >= timedelta(days=3):\n pending_order.status = \"expired\"\n save_to_db(pending_order, \"Pending order expired.\")\n \n- for expired_order in expired_orders:\n- if datetime.now() - expired_order.created_at >= timedelta(days=6):\n- expired_order.status = \"deleted\"\n- expired_order.trashed_at = datetime.now()\n- save_to_db(expired_order, \"Expired order deleted\")\n-\n \n def send_after_event_mail():\n from app import current_app as app\n", "issue": "Do not show deleted orders in organiser ui and do not auto delete expired orders\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom dateutil.relativedelta import relativedelta\nfrom flask import url_for\nfrom sqlalchemy_continuum import transaction_class\n\nfrom app.helpers.data import DataManager, delete_from_db, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_after_event, monthdelta, send_followup_email_for_monthly_fee_payment\nfrom app.helpers.helpers import send_email_for_expired_orders, send_email_for_monthly_fee_payment\nfrom app.helpers.payment import get_fee\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.user import User\n\n\ndef empty_trash():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(in_trash=True)\n users = User.query.filter_by(in_trash=True)\n sessions = Session.query.filter_by(in_trash=True)\n orders = Order.query.filter_by(status=\"deleted\")\n pending_orders = Order.query.filter_by(status=\"pending\")\n expired_orders = Order.query.filter_by(status=\"expired\")\n for event in events:\n if datetime.now() - event.trash_date >= timedelta(days=30):\n DataManager.delete_event(event.id)\n\n for user in users:\n if datetime.now() - user.trash_date >= timedelta(days=30):\n transaction = transaction_class(Event)\n transaction.query.filter_by(user_id=user.id).delete()\n delete_from_db(user, \"User deleted permanently\")\n\n for session_ in sessions:\n if datetime.now() - session_.trash_date >= timedelta(days=30):\n delete_from_db(session_, \"Session deleted permanently\")\n\n for order in orders:\n if datetime.now() - order.trashed_at >= timedelta(days=30):\n delete_from_db(order, \"Order deleted permanently\")\n\n for pending_order in pending_orders:\n if datetime.now() - pending_order.created_at >= timedelta(days=3):\n pending_order.status = \"expired\"\n save_to_db(pending_order, \"Pending order expired.\")\n\n for expired_order in expired_orders:\n if datetime.now() - expired_order.created_at >= timedelta(days=6):\n expired_order.status = \"deleted\"\n expired_order.trashed_at = datetime.now()\n save_to_db(expired_order, \"Expired order deleted\")\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n upcoming_events = DataGetter.get_upcoming_events()\n organizers = DataGetter.get_user_event_roles_by_role_name(\n event.id, 'organizer')\n speakers = DataGetter.get_user_event_roles_by_role_name(event.id,\n 'speaker')\n if datetime.now() > event.end_time:\n for speaker in speakers:\n send_after_event(speaker.user.email, event.id,\n upcoming_events)\n for organizer in organizers:\n send_after_event(organizer.user.email, event.id,\n upcoming_events)\n\n\ndef send_mail_to_expired_orders():\n from app import current_app as app\n with app.app_context():\n orders = DataGetter.get_expired_orders()\n for order in orders:\n send_email_for_expired_orders(order.user.email, order.event.name, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.filter_by(event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n new_invoice = EventInvoice(amount=fee_total, event_id=event.id, user_id=event.creator_id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.max_quantity:\n new_invoice.amount = fee_total - (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=new_invoice.identifier, _external=True))\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=incomplete_invoice.identifier,\n _external=True))\n", "path": "app/helpers/scheduled_jobs.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom dateutil.relativedelta import relativedelta\nfrom flask import url_for\nfrom sqlalchemy_continuum import transaction_class\n\nfrom app.helpers.data import DataManager, delete_from_db, save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_after_event, monthdelta, send_followup_email_for_monthly_fee_payment\nfrom app.helpers.helpers import send_email_for_expired_orders, send_email_for_monthly_fee_payment\nfrom app.helpers.payment import get_fee\nfrom app.helpers.ticketing import TicketingManager\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.user import User\n\n\ndef empty_trash():\n from app import current_app as app\n with app.app_context():\n events = Event.query.filter_by(in_trash=True)\n users = User.query.filter_by(in_trash=True)\n sessions = Session.query.filter_by(in_trash=True)\n pending_orders = Order.query.filter_by(status=\"pending\")\n\n for event in events:\n if datetime.now() - event.trash_date >= timedelta(days=30):\n DataManager.delete_event(event.id)\n\n for user in users:\n if datetime.now() - user.trash_date >= timedelta(days=30):\n transaction = transaction_class(Event)\n transaction.query.filter_by(user_id=user.id).delete()\n delete_from_db(user, \"User deleted permanently\")\n\n for session_ in sessions:\n if datetime.now() - session_.trash_date >= timedelta(days=30):\n delete_from_db(session_, \"Session deleted permanently\")\n\n for pending_order in pending_orders:\n if datetime.now() - pending_order.created_at >= timedelta(days=3):\n pending_order.status = \"expired\"\n save_to_db(pending_order, \"Pending order expired.\")\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n upcoming_events = DataGetter.get_upcoming_events()\n organizers = DataGetter.get_user_event_roles_by_role_name(\n event.id, 'organizer')\n speakers = DataGetter.get_user_event_roles_by_role_name(event.id,\n 'speaker')\n if datetime.now() > event.end_time:\n for speaker in speakers:\n send_after_event(speaker.user.email, event.id,\n upcoming_events)\n for organizer in organizers:\n send_after_event(organizer.user.email, event.id,\n upcoming_events)\n\n\ndef send_mail_to_expired_orders():\n from app import current_app as app\n with app.app_context():\n orders = DataGetter.get_expired_orders()\n for order in orders:\n send_email_for_expired_orders(order.user.email, order.event.name, order.get_invoice_number(),\n url_for('ticketing.view_order_after_payment',\n order_identifier=order.identifier, _external=True))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.filter_by(event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n new_invoice = EventInvoice(amount=fee_total, event_id=event.id, user_id=event.creator_id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.max_quantity:\n new_invoice.amount = fee_total - (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=new_invoice.identifier, _external=True))\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\"%b %Y\") # Displayed as Aug 2016\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n url_for('event_invoicing.view_invoice',\n invoice_identifier=incomplete_invoice.identifier,\n _external=True))\n", "path": "app/helpers/scheduled_jobs.py"}]} | 1,883 | 356 |
gh_patches_debug_40331 | rasdani/github-patches | git_diff | searxng__searxng-3418 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wikimedia Commons
**Working URL to the engine**
https://commons.wikimedia.org
**Why do you want to add this engine?**
Out of all of the Wikimedia projects, Wikimedia Commons is one of only two to not appear in any engine category in SearXNG, with the other being Wikispecies.
**Features of this engine**
It has a collection of [82,886,704](https://commons.wikimedia.org/wiki/Special:Statistics) [freely usable](https://commons.wikimedia.org/wiki/Commons:Reusing_content_outside_Wikimedia) media files.
**How can SearXNG fetch the information from this engine?**
`https://commons.wikimedia.org/w/index.php?search=%s` with `%s` being what you want to search.
**Applicable category of this engine**
General, files, images, music, videos.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/wikicommons.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """Wikimedia Commons (images)
3
4 """
5
6 from urllib.parse import urlencode
7
8 # about
9 about = {
10 "website": 'https://commons.wikimedia.org/',
11 "wikidata_id": 'Q565',
12 "official_api_documentation": 'https://commons.wikimedia.org/w/api.php',
13 "use_official_api": True,
14 "require_api_key": False,
15 "results": 'JSON',
16 }
17
18 base_url = "https://commons.wikimedia.org"
19 search_prefix = (
20 '?action=query'
21 '&format=json'
22 '&generator=search'
23 '&gsrnamespace=6'
24 '&gsrprop=snippet'
25 '&prop=info|imageinfo'
26 '&iiprop=url|size|mime'
27 '&iiurlheight=180' # needed for the thumb url
28 )
29 paging = True
30 number_of_results = 10
31
32
33 def request(query, params):
34 language = 'en'
35 if params['language'] != 'all':
36 language = params['language'].split('-')[0]
37
38 args = {
39 'uselang': language,
40 'gsrlimit': number_of_results,
41 'gsroffset': number_of_results * (params["pageno"] - 1),
42 'gsrsearch': "filetype:bitmap|drawing " + query,
43 }
44
45 params["url"] = f"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}"
46 return params
47
48
49 def response(resp):
50 results = []
51 json = resp.json()
52
53 if not json.get("query", {}).get("pages"):
54 return results
55
56 for item in json["query"]["pages"].values():
57 imageinfo = item["imageinfo"][0]
58 title = item["title"].replace("File:", "").rsplit('.', 1)[0]
59 result = {
60 'url': imageinfo["descriptionurl"],
61 'title': title,
62 'content': item["snippet"],
63 'img_src': imageinfo["url"],
64 'resolution': f'{imageinfo["width"]} x {imageinfo["height"]}',
65 'thumbnail_src': imageinfo["thumburl"],
66 'template': 'images.html',
67 }
68 results.append(result)
69
70 return results
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/wikicommons.py b/searx/engines/wikicommons.py
--- a/searx/engines/wikicommons.py
+++ b/searx/engines/wikicommons.py
@@ -3,6 +3,8 @@
"""
+import datetime
+
from urllib.parse import urlencode
# about
@@ -14,6 +16,8 @@
"require_api_key": False,
"results": 'JSON',
}
+categories = ['images']
+search_type = 'images'
base_url = "https://commons.wikimedia.org"
search_prefix = (
@@ -29,17 +33,29 @@
paging = True
number_of_results = 10
+search_types = {
+ 'images': 'bitmap|drawing',
+ 'videos': 'video',
+ 'audio': 'audio',
+ 'files': 'multimedia|office|archive|3d',
+}
+
def request(query, params):
language = 'en'
if params['language'] != 'all':
language = params['language'].split('-')[0]
+ if search_type not in search_types:
+ raise ValueError(f"Unsupported search type: {search_type}")
+
+ filetype = search_types[search_type]
+
args = {
'uselang': language,
'gsrlimit': number_of_results,
'gsroffset': number_of_results * (params["pageno"] - 1),
- 'gsrsearch': "filetype:bitmap|drawing " + query,
+ 'gsrsearch': f"filetype:{filetype} {query}",
}
params["url"] = f"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}"
@@ -52,7 +68,6 @@
if not json.get("query", {}).get("pages"):
return results
-
for item in json["query"]["pages"].values():
imageinfo = item["imageinfo"][0]
title = item["title"].replace("File:", "").rsplit('.', 1)[0]
@@ -60,11 +75,28 @@
'url': imageinfo["descriptionurl"],
'title': title,
'content': item["snippet"],
- 'img_src': imageinfo["url"],
- 'resolution': f'{imageinfo["width"]} x {imageinfo["height"]}',
- 'thumbnail_src': imageinfo["thumburl"],
- 'template': 'images.html',
}
+
+ if search_type == "images":
+ result['template'] = 'images.html'
+ result['img_src'] = imageinfo["url"]
+ result['thumbnail_src'] = imageinfo["thumburl"]
+ result['resolution'] = f'{imageinfo["width"]} x {imageinfo["height"]}'
+ else:
+ result['thumbnail'] = imageinfo["thumburl"]
+
+ if search_type == "videos":
+ result['template'] = 'videos.html'
+ if imageinfo.get('duration'):
+ result['length'] = datetime.timedelta(seconds=int(imageinfo['duration']))
+ result['iframe_src'] = imageinfo['url']
+ elif search_type == "files":
+ result['template'] = 'files.html'
+ result['metadata'] = imageinfo['mime']
+ result['size'] = imageinfo['size']
+ elif search_type == "audio":
+ result['iframe_src'] = imageinfo['url']
+
results.append(result)
return results
| {"golden_diff": "diff --git a/searx/engines/wikicommons.py b/searx/engines/wikicommons.py\n--- a/searx/engines/wikicommons.py\n+++ b/searx/engines/wikicommons.py\n@@ -3,6 +3,8 @@\n \n \"\"\"\n \n+import datetime\n+\n from urllib.parse import urlencode\n \n # about\n@@ -14,6 +16,8 @@\n \"require_api_key\": False,\n \"results\": 'JSON',\n }\n+categories = ['images']\n+search_type = 'images'\n \n base_url = \"https://commons.wikimedia.org\"\n search_prefix = (\n@@ -29,17 +33,29 @@\n paging = True\n number_of_results = 10\n \n+search_types = {\n+ 'images': 'bitmap|drawing',\n+ 'videos': 'video',\n+ 'audio': 'audio',\n+ 'files': 'multimedia|office|archive|3d',\n+}\n+\n \n def request(query, params):\n language = 'en'\n if params['language'] != 'all':\n language = params['language'].split('-')[0]\n \n+ if search_type not in search_types:\n+ raise ValueError(f\"Unsupported search type: {search_type}\")\n+\n+ filetype = search_types[search_type]\n+\n args = {\n 'uselang': language,\n 'gsrlimit': number_of_results,\n 'gsroffset': number_of_results * (params[\"pageno\"] - 1),\n- 'gsrsearch': \"filetype:bitmap|drawing \" + query,\n+ 'gsrsearch': f\"filetype:{filetype} {query}\",\n }\n \n params[\"url\"] = f\"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}\"\n@@ -52,7 +68,6 @@\n \n if not json.get(\"query\", {}).get(\"pages\"):\n return results\n-\n for item in json[\"query\"][\"pages\"].values():\n imageinfo = item[\"imageinfo\"][0]\n title = item[\"title\"].replace(\"File:\", \"\").rsplit('.', 1)[0]\n@@ -60,11 +75,28 @@\n 'url': imageinfo[\"descriptionurl\"],\n 'title': title,\n 'content': item[\"snippet\"],\n- 'img_src': imageinfo[\"url\"],\n- 'resolution': f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}',\n- 'thumbnail_src': imageinfo[\"thumburl\"],\n- 'template': 'images.html',\n }\n+\n+ if search_type == \"images\":\n+ result['template'] = 'images.html'\n+ result['img_src'] = imageinfo[\"url\"]\n+ result['thumbnail_src'] = imageinfo[\"thumburl\"]\n+ result['resolution'] = f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}'\n+ else:\n+ result['thumbnail'] = imageinfo[\"thumburl\"]\n+\n+ if search_type == \"videos\":\n+ result['template'] = 'videos.html'\n+ if imageinfo.get('duration'):\n+ result['length'] = datetime.timedelta(seconds=int(imageinfo['duration']))\n+ result['iframe_src'] = imageinfo['url']\n+ elif search_type == \"files\":\n+ result['template'] = 'files.html'\n+ result['metadata'] = imageinfo['mime']\n+ result['size'] = imageinfo['size']\n+ elif search_type == \"audio\":\n+ result['iframe_src'] = imageinfo['url']\n+\n results.append(result)\n \n return results\n", "issue": "Wikimedia Commons\n**Working URL to the engine**\r\nhttps://commons.wikimedia.org\r\n\r\n**Why do you want to add this engine?**\r\nOut of all of the Wikimedia projects, Wikimedia Commons is one of only two to not appear in any engine category in SearXNG, with the other being Wikispecies.\r\n\r\n**Features of this engine**\r\nIt has a collection of [82,886,704](https://commons.wikimedia.org/wiki/Special:Statistics) [freely usable](https://commons.wikimedia.org/wiki/Commons:Reusing_content_outside_Wikimedia) media files.\r\n\r\n**How can SearXNG fetch the information from this engine?**\r\n`https://commons.wikimedia.org/w/index.php?search=%s` with `%s` being what you want to search.\r\n\r\n**Applicable category of this engine**\r\nGeneral, files, images, music, videos.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Wikimedia Commons (images)\n\n\"\"\"\n\nfrom urllib.parse import urlencode\n\n# about\nabout = {\n \"website\": 'https://commons.wikimedia.org/',\n \"wikidata_id\": 'Q565',\n \"official_api_documentation\": 'https://commons.wikimedia.org/w/api.php',\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\nbase_url = \"https://commons.wikimedia.org\"\nsearch_prefix = (\n '?action=query'\n '&format=json'\n '&generator=search'\n '&gsrnamespace=6'\n '&gsrprop=snippet'\n '&prop=info|imageinfo'\n '&iiprop=url|size|mime'\n '&iiurlheight=180' # needed for the thumb url\n)\npaging = True\nnumber_of_results = 10\n\n\ndef request(query, params):\n language = 'en'\n if params['language'] != 'all':\n language = params['language'].split('-')[0]\n\n args = {\n 'uselang': language,\n 'gsrlimit': number_of_results,\n 'gsroffset': number_of_results * (params[\"pageno\"] - 1),\n 'gsrsearch': \"filetype:bitmap|drawing \" + query,\n }\n\n params[\"url\"] = f\"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}\"\n return params\n\n\ndef response(resp):\n results = []\n json = resp.json()\n\n if not json.get(\"query\", {}).get(\"pages\"):\n return results\n\n for item in json[\"query\"][\"pages\"].values():\n imageinfo = item[\"imageinfo\"][0]\n title = item[\"title\"].replace(\"File:\", \"\").rsplit('.', 1)[0]\n result = {\n 'url': imageinfo[\"descriptionurl\"],\n 'title': title,\n 'content': item[\"snippet\"],\n 'img_src': imageinfo[\"url\"],\n 'resolution': f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}',\n 'thumbnail_src': imageinfo[\"thumburl\"],\n 'template': 'images.html',\n }\n results.append(result)\n\n return results\n", "path": "searx/engines/wikicommons.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"Wikimedia Commons (images)\n\n\"\"\"\n\nimport datetime\n\nfrom urllib.parse import urlencode\n\n# about\nabout = {\n \"website\": 'https://commons.wikimedia.org/',\n \"wikidata_id\": 'Q565',\n \"official_api_documentation\": 'https://commons.wikimedia.org/w/api.php',\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\ncategories = ['images']\nsearch_type = 'images'\n\nbase_url = \"https://commons.wikimedia.org\"\nsearch_prefix = (\n '?action=query'\n '&format=json'\n '&generator=search'\n '&gsrnamespace=6'\n '&gsrprop=snippet'\n '&prop=info|imageinfo'\n '&iiprop=url|size|mime'\n '&iiurlheight=180' # needed for the thumb url\n)\npaging = True\nnumber_of_results = 10\n\nsearch_types = {\n 'images': 'bitmap|drawing',\n 'videos': 'video',\n 'audio': 'audio',\n 'files': 'multimedia|office|archive|3d',\n}\n\n\ndef request(query, params):\n language = 'en'\n if params['language'] != 'all':\n language = params['language'].split('-')[0]\n\n if search_type not in search_types:\n raise ValueError(f\"Unsupported search type: {search_type}\")\n\n filetype = search_types[search_type]\n\n args = {\n 'uselang': language,\n 'gsrlimit': number_of_results,\n 'gsroffset': number_of_results * (params[\"pageno\"] - 1),\n 'gsrsearch': f\"filetype:{filetype} {query}\",\n }\n\n params[\"url\"] = f\"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}\"\n return params\n\n\ndef response(resp):\n results = []\n json = resp.json()\n\n if not json.get(\"query\", {}).get(\"pages\"):\n return results\n for item in json[\"query\"][\"pages\"].values():\n imageinfo = item[\"imageinfo\"][0]\n title = item[\"title\"].replace(\"File:\", \"\").rsplit('.', 1)[0]\n result = {\n 'url': imageinfo[\"descriptionurl\"],\n 'title': title,\n 'content': item[\"snippet\"],\n }\n\n if search_type == \"images\":\n result['template'] = 'images.html'\n result['img_src'] = imageinfo[\"url\"]\n result['thumbnail_src'] = imageinfo[\"thumburl\"]\n result['resolution'] = f'{imageinfo[\"width\"]} x {imageinfo[\"height\"]}'\n else:\n result['thumbnail'] = imageinfo[\"thumburl\"]\n\n if search_type == \"videos\":\n result['template'] = 'videos.html'\n if imageinfo.get('duration'):\n result['length'] = datetime.timedelta(seconds=int(imageinfo['duration']))\n result['iframe_src'] = imageinfo['url']\n elif search_type == \"files\":\n result['template'] = 'files.html'\n result['metadata'] = imageinfo['mime']\n result['size'] = imageinfo['size']\n elif search_type == \"audio\":\n result['iframe_src'] = imageinfo['url']\n\n results.append(result)\n\n return results\n", "path": "searx/engines/wikicommons.py"}]} | 1,084 | 786 |
gh_patches_debug_60750 | rasdani/github-patches | git_diff | larq__larq-80 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add docs on how to define your own quantizer
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `larq/quantizers.py`
Content:
```
1 """A Quantizer defines the way of transforming a full precision input to a
2 quantized output and the pseudo-gradient method used for the backwards pass."""
3
4 import tensorflow as tf
5 from larq import utils
6
7
8 def sign(x):
9 """A sign function that will never be zero"""
10 return tf.sign(tf.sign(x) + 0.1)
11
12
13 @tf.custom_gradient
14 def _binarize_with_identity_grad(x):
15 def grad(dy):
16 return dy
17
18 return sign(x), grad
19
20
21 @tf.custom_gradient
22 def _binarize_with_weighted_grad(x):
23 def grad(dy):
24 return (1 - tf.abs(x)) * 2 * dy
25
26 return sign(x), grad
27
28
29 @utils.register_keras_custom_object
30 def ste_sign(x):
31 r"""
32 Sign binarization function.
33 \\[
34 q(x) = \begin{cases}
35 -1 & x < 0 \\\
36 1 & x \geq 0
37 \end{cases}
38 \\]
39
40 The gradient is estimated using the Straight-Through Estimator
41 (essentially the binarization is replaced by a clipped identity on the
42 backward pass).
43 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
44 1 & \left|x\right| \leq 1 \\\
45 0 & \left|x\right| > 1
46 \end{cases}\\]
47
48 # Arguments
49 x: Input tensor.
50
51 # Returns
52 Binarized tensor.
53
54 # References
55 - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
56 Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
57 """
58
59 x = tf.clip_by_value(x, -1, 1)
60
61 return _binarize_with_identity_grad(x)
62
63
64 @utils.register_keras_custom_object
65 def magnitude_aware_sign(x):
66 r"""
67 Magnitude-aware sign for birealnet.
68
69
70 # Arguments
71 x: Input tensor
72
73 # Returns
74 Scaled binarized tensor (with values in $\{-a, a\}$, where $a$ is a float).
75
76 # References
77 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
78 Representational Capability and Advanced Training
79 Algorithm](https://arxiv.org/abs/1808.00278)
80
81 """
82 scale_factor = tf.stop_gradient(
83 tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))
84 )
85 return scale_factor * ste_sign(x)
86
87
88 @utils.register_keras_custom_object
89 def approx_sign(x):
90 r"""
91 Sign binarization function.
92 \\[
93 q(x) = \begin{cases}
94 -1 & x < 0 \\\
95 1 & x \geq 0
96 \end{cases}
97 \\]
98
99 The gradient is estimated using the ApproxSign method.
100 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
101 (2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
102 0 & \left|x\right| > 1
103 \end{cases}
104 \\]
105
106 # Arguments
107 x: Input tensor.
108
109 # Returns
110 Binarized tensor.
111
112 # References
113 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
114 Representational Capability and Advanced
115 Training Algorithm](http://arxiv.org/abs/1808.00278)
116 """
117
118 x = tf.clip_by_value(x, -1, 1)
119
120 return _binarize_with_weighted_grad(x)
121
122
123 def serialize(initializer):
124 return tf.keras.utils.serialize_keras_object(initializer)
125
126
127 def deserialize(name, custom_objects=None):
128 return tf.keras.utils.deserialize_keras_object(
129 name,
130 module_objects=globals(),
131 custom_objects=custom_objects,
132 printable_module_name="quantization function",
133 )
134
135
136 def get(identifier):
137 if identifier is None:
138 return None
139 if isinstance(identifier, str):
140 return deserialize(str(identifier))
141 if callable(identifier):
142 return identifier
143 raise ValueError(
144 f"Could not interpret quantization function identifier: {identifier}"
145 )
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -64,7 +64,7 @@
@utils.register_keras_custom_object
def magnitude_aware_sign(x):
r"""
- Magnitude-aware sign for birealnet.
+ Magnitude-aware sign for Bi-Real Net.
# Arguments
| {"golden_diff": "diff --git a/larq/quantizers.py b/larq/quantizers.py\n--- a/larq/quantizers.py\n+++ b/larq/quantizers.py\n@@ -64,7 +64,7 @@\n @utils.register_keras_custom_object\n def magnitude_aware_sign(x):\n r\"\"\"\n- Magnitude-aware sign for birealnet.\n+ Magnitude-aware sign for Bi-Real Net.\n \n \n # Arguments\n", "issue": "Add docs on how to define your own quantizer\n\n", "before_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 0.1)\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\[email protected]_keras_custom_object\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for birealnet.\n\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.stop_gradient(\n tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n )\n return scale_factor * ste_sign(x)\n\n\[email protected]_keras_custom_object\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}], "after_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils\n\n\ndef sign(x):\n \"\"\"A sign function that will never be zero\"\"\"\n return tf.sign(tf.sign(x) + 0.1)\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return sign(x), grad\n\n\[email protected]_keras_custom_object\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.stop_gradient(\n tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n )\n return scale_factor * ste_sign(x)\n\n\[email protected]_keras_custom_object\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}]} | 1,578 | 99 |
gh_patches_debug_33924 | rasdani/github-patches | git_diff | PrefectHQ__prefect-710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Context docs are broken
For some reason the actual `context` class signature is not being documented.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/prefect/utilities/context.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula
2
3 """
4 This module implements the Prefect context that is available when tasks run.
5
6 Tasks can import prefect.context and access attributes that will be overwritten
7 when the task is run.
8
9 Example:
10
11 ```python
12 import prefect.context
13 with prefect.context(a=1, b=2):
14 print(prefect.context.a) # 1
15 print (prefect.context.a) # undefined
16 ```
17
18 Prefect provides various key / value pairs in context that are always available during task runs:
19
20 | Variable | Description |
21 | :--- | --- |
22 | `scheduled_start_time` | an actual datetime object representing the scheduled start time for the Flow run; falls back to `now` for unscheduled runs |
23 | `date` | an actual datetime object representing the current time |
24 | `today` | the current date formatted as `YYYY-MM-DD`|
25 | `today_nodash` | the current date formatted as `YYYYMMDD`|
26 | `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|
27 | `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|
28 | `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|
29 | `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|
30 | `task_name` | the name of the current task |
31 """
32
33 import contextlib
34 import threading
35 from typing import Any, Iterator, MutableMapping
36
37 from prefect.configuration import config
38 from prefect.utilities.collections import DotDict
39
40
41 class Context(DotDict, threading.local):
42 """
43 A thread safe context store for Prefect data.
44
45 The `Context` is a `DotDict` subclass, and can be instantiated the same way.
46
47 Args:
48 - *args (Any): arguments to provide to the `DotDict` constructor (e.g.,
49 an initial dictionary)
50 - *kwargs (Any): any key / value pairs to initialize this context with
51 """
52
53 def __init__(self, *args, **kwargs) -> None:
54 super().__init__(*args, **kwargs)
55 if "context" in config:
56 self.update(config.context)
57
58 def __repr__(self) -> str:
59 return "<Context>"
60
61 @contextlib.contextmanager
62 def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator["Context"]:
63 """
64 A context manager for setting / resetting the Prefect context
65
66 Example:
67 import prefect.context
68 with prefect.context(dict(a=1, b=2), c=3):
69 print(prefect.context.a) # 1
70 """
71 previous_context = self.copy()
72 try:
73 self.update(*args, **kwargs)
74 yield self
75 finally:
76 self.clear()
77 self.update(previous_context)
78
79
80 context = Context()
81
```
Path: `src/prefect/tasks/templates/jinja2.py`
Content:
```
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula
2
3 from typing import Any
4
5 from jinja2 import Template
6
7 import prefect
8 from prefect import Task
9
10
11 class JinjaTemplateTask(Task):
12 """
13 This task contains a Jinja template which is formatted with the results of any
14 upstream tasks and returned.
15
16 Variables from `prefect.context` will also be used for rendering.
17
18 Args:
19 - template (str, optional): the optional _default_ template string to render at runtime;
20 can also be provided as a keyword to `run`, which takes precendence over this default.
21 - **kwargs (optional): additional keyword arguments to pass to the
22 standard Task constructor
23 """
24
25 def __init__(self, template: str = None, **kwargs: Any):
26 self.template = Template(template or "")
27 super().__init__(**kwargs)
28
29 def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore
30 """
31 Formats the Jinja Template with the provided kwargs.
32
33 Args:
34 - template (str, optional): the template string to render; if not
35 provided, `self.template` will be used
36 - **format_kwargs (optional): keyword arguments to use for
37 rendering; note that variables from `prefect.context` will also be used
38
39 Returns:
40 - str: the rendered string
41 """
42 template = self.template if template is None else Template(template)
43 with prefect.context(**format_kwargs) as data:
44 return template.render(**data)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/prefect/tasks/templates/jinja2.py b/src/prefect/tasks/templates/jinja2.py
--- a/src/prefect/tasks/templates/jinja2.py
+++ b/src/prefect/tasks/templates/jinja2.py
@@ -6,6 +6,7 @@
import prefect
from prefect import Task
+from prefect.utilities.tasks import defaults_from_attrs
class JinjaTemplateTask(Task):
@@ -23,9 +24,10 @@
"""
def __init__(self, template: str = None, **kwargs: Any):
- self.template = Template(template or "")
+ self.template = template or ""
super().__init__(**kwargs)
+ @defaults_from_attrs("template")
def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore
"""
Formats the Jinja Template with the provided kwargs.
@@ -39,6 +41,6 @@
Returns:
- str: the rendered string
"""
- template = self.template if template is None else Template(template)
+ template = Template(template)
with prefect.context(**format_kwargs) as data:
return template.render(**data)
diff --git a/src/prefect/utilities/context.py b/src/prefect/utilities/context.py
--- a/src/prefect/utilities/context.py
+++ b/src/prefect/utilities/context.py
@@ -10,9 +10,11 @@
```python
import prefect.context
+
with prefect.context(a=1, b=2):
print(prefect.context.a) # 1
-print (prefect.context.a) # undefined
+
+print(prefect.context.a) # undefined
```
Prefect provides various key / value pairs in context that are always available during task runs:
@@ -28,6 +30,8 @@
| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|
| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|
| `task_name` | the name of the current task |
+
+Users can also provide values to context at runtime.
"""
import contextlib
| {"golden_diff": "diff --git a/src/prefect/tasks/templates/jinja2.py b/src/prefect/tasks/templates/jinja2.py\n--- a/src/prefect/tasks/templates/jinja2.py\n+++ b/src/prefect/tasks/templates/jinja2.py\n@@ -6,6 +6,7 @@\n \n import prefect\n from prefect import Task\n+from prefect.utilities.tasks import defaults_from_attrs\n \n \n class JinjaTemplateTask(Task):\n@@ -23,9 +24,10 @@\n \"\"\"\n \n def __init__(self, template: str = None, **kwargs: Any):\n- self.template = Template(template or \"\")\n+ self.template = template or \"\"\n super().__init__(**kwargs)\n \n+ @defaults_from_attrs(\"template\")\n def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore\n \"\"\"\n Formats the Jinja Template with the provided kwargs.\n@@ -39,6 +41,6 @@\n Returns:\n - str: the rendered string\n \"\"\"\n- template = self.template if template is None else Template(template)\n+ template = Template(template)\n with prefect.context(**format_kwargs) as data:\n return template.render(**data)\ndiff --git a/src/prefect/utilities/context.py b/src/prefect/utilities/context.py\n--- a/src/prefect/utilities/context.py\n+++ b/src/prefect/utilities/context.py\n@@ -10,9 +10,11 @@\n \n ```python\n import prefect.context\n+\n with prefect.context(a=1, b=2):\n print(prefect.context.a) # 1\n-print (prefect.context.a) # undefined\n+\n+print(prefect.context.a) # undefined\n ```\n \n Prefect provides various key / value pairs in context that are always available during task runs:\n@@ -28,6 +30,8 @@\n | `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|\n | `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|\n | `task_name` | the name of the current task |\n+\n+Users can also provide values to context at runtime.\n \"\"\"\n \n import contextlib\n", "issue": "Context docs are broken\nFor some reason the actual `context` class signature is not being documented.\n", "before_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\n\"\"\"\nThis module implements the Prefect context that is available when tasks run.\n\nTasks can import prefect.context and access attributes that will be overwritten\nwhen the task is run.\n\nExample:\n\n```python\nimport prefect.context\nwith prefect.context(a=1, b=2):\n print(prefect.context.a) # 1\nprint (prefect.context.a) # undefined\n```\n\nPrefect provides various key / value pairs in context that are always available during task runs:\n\n| Variable | Description |\n| :--- | --- |\n| `scheduled_start_time` | an actual datetime object representing the scheduled start time for the Flow run; falls back to `now` for unscheduled runs |\n| `date` | an actual datetime object representing the current time |\n| `today` | the current date formatted as `YYYY-MM-DD`|\n| `today_nodash` | the current date formatted as `YYYYMMDD`|\n| `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|\n| `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|\n| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|\n| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|\n| `task_name` | the name of the current task |\n\"\"\"\n\nimport contextlib\nimport threading\nfrom typing import Any, Iterator, MutableMapping\n\nfrom prefect.configuration import config\nfrom prefect.utilities.collections import DotDict\n\n\nclass Context(DotDict, threading.local):\n \"\"\"\n A thread safe context store for Prefect data.\n\n The `Context` is a `DotDict` subclass, and can be instantiated the same way.\n\n Args:\n - *args (Any): arguments to provide to the `DotDict` constructor (e.g.,\n an initial dictionary)\n - *kwargs (Any): any key / value pairs to initialize this context with\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n if \"context\" in config:\n self.update(config.context)\n\n def __repr__(self) -> str:\n return \"<Context>\"\n\n @contextlib.contextmanager\n def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator[\"Context\"]:\n \"\"\"\n A context manager for setting / resetting the Prefect context\n\n Example:\n import prefect.context\n with prefect.context(dict(a=1, b=2), c=3):\n print(prefect.context.a) # 1\n \"\"\"\n previous_context = self.copy()\n try:\n self.update(*args, **kwargs)\n yield self\n finally:\n self.clear()\n self.update(previous_context)\n\n\ncontext = Context()\n", "path": "src/prefect/utilities/context.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\nfrom typing import Any\n\nfrom jinja2 import Template\n\nimport prefect\nfrom prefect import Task\n\n\nclass JinjaTemplateTask(Task):\n \"\"\"\n This task contains a Jinja template which is formatted with the results of any\n upstream tasks and returned.\n\n Variables from `prefect.context` will also be used for rendering.\n\n Args:\n - template (str, optional): the optional _default_ template string to render at runtime;\n can also be provided as a keyword to `run`, which takes precendence over this default.\n - **kwargs (optional): additional keyword arguments to pass to the\n standard Task constructor\n \"\"\"\n\n def __init__(self, template: str = None, **kwargs: Any):\n self.template = Template(template or \"\")\n super().__init__(**kwargs)\n\n def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore\n \"\"\"\n Formats the Jinja Template with the provided kwargs.\n\n Args:\n - template (str, optional): the template string to render; if not\n provided, `self.template` will be used\n - **format_kwargs (optional): keyword arguments to use for\n rendering; note that variables from `prefect.context` will also be used\n\n Returns:\n - str: the rendered string\n \"\"\"\n template = self.template if template is None else Template(template)\n with prefect.context(**format_kwargs) as data:\n return template.render(**data)\n", "path": "src/prefect/tasks/templates/jinja2.py"}], "after_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\n\"\"\"\nThis module implements the Prefect context that is available when tasks run.\n\nTasks can import prefect.context and access attributes that will be overwritten\nwhen the task is run.\n\nExample:\n\n```python\nimport prefect.context\n\nwith prefect.context(a=1, b=2):\n print(prefect.context.a) # 1\n\nprint(prefect.context.a) # undefined\n```\n\nPrefect provides various key / value pairs in context that are always available during task runs:\n\n| Variable | Description |\n| :--- | --- |\n| `scheduled_start_time` | an actual datetime object representing the scheduled start time for the Flow run; falls back to `now` for unscheduled runs |\n| `date` | an actual datetime object representing the current time |\n| `today` | the current date formatted as `YYYY-MM-DD`|\n| `today_nodash` | the current date formatted as `YYYYMMDD`|\n| `yesterday` | yesterday's date formatted as `YYYY-MM-DD`|\n| `yesterday_nodash` | yesterday's date formatted as `YYYYMMDD`|\n| `tomorrow` | tomorrow's date formatted as `YYYY-MM-DD`|\n| `tomorrow_nodash` | tomorrow's date formatted as `YYYYMMDD`|\n| `task_name` | the name of the current task |\n\nUsers can also provide values to context at runtime.\n\"\"\"\n\nimport contextlib\nimport threading\nfrom typing import Any, Iterator, MutableMapping\n\nfrom prefect.configuration import config\nfrom prefect.utilities.collections import DotDict\n\n\nclass Context(DotDict, threading.local):\n \"\"\"\n A thread safe context store for Prefect data.\n\n The `Context` is a `DotDict` subclass, and can be instantiated the same way.\n\n Args:\n - *args (Any): arguments to provide to the `DotDict` constructor (e.g.,\n an initial dictionary)\n - *kwargs (Any): any key / value pairs to initialize this context with\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n if \"context\" in config:\n self.update(config.context)\n\n def __repr__(self) -> str:\n return \"<Context>\"\n\n @contextlib.contextmanager\n def __call__(self, *args: MutableMapping, **kwargs: Any) -> Iterator[\"Context\"]:\n \"\"\"\n A context manager for setting / resetting the Prefect context\n\n Example:\n import prefect.context\n with prefect.context(dict(a=1, b=2), c=3):\n print(prefect.context.a) # 1\n \"\"\"\n previous_context = self.copy()\n try:\n self.update(*args, **kwargs)\n yield self\n finally:\n self.clear()\n self.update(previous_context)\n\n\ncontext = Context()\n", "path": "src/prefect/utilities/context.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/beta-eula\n\nfrom typing import Any\n\nfrom jinja2 import Template\n\nimport prefect\nfrom prefect import Task\nfrom prefect.utilities.tasks import defaults_from_attrs\n\n\nclass JinjaTemplateTask(Task):\n \"\"\"\n This task contains a Jinja template which is formatted with the results of any\n upstream tasks and returned.\n\n Variables from `prefect.context` will also be used for rendering.\n\n Args:\n - template (str, optional): the optional _default_ template string to render at runtime;\n can also be provided as a keyword to `run`, which takes precendence over this default.\n - **kwargs (optional): additional keyword arguments to pass to the\n standard Task constructor\n \"\"\"\n\n def __init__(self, template: str = None, **kwargs: Any):\n self.template = template or \"\"\n super().__init__(**kwargs)\n\n @defaults_from_attrs(\"template\")\n def run(self, template: str = None, **format_kwargs: Any) -> str: # type: ignore\n \"\"\"\n Formats the Jinja Template with the provided kwargs.\n\n Args:\n - template (str, optional): the template string to render; if not\n provided, `self.template` will be used\n - **format_kwargs (optional): keyword arguments to use for\n rendering; note that variables from `prefect.context` will also be used\n\n Returns:\n - str: the rendered string\n \"\"\"\n template = Template(template)\n with prefect.context(**format_kwargs) as data:\n return template.render(**data)\n", "path": "src/prefect/tasks/templates/jinja2.py"}]} | 1,492 | 469 |
gh_patches_debug_122 | rasdani/github-patches | git_diff | XanaduAI__strawberryfields-581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dependency versions error
#### Issue description
I made a fork of this project and tried to setup a new virtual environment.
```
python -m venv sf-venv
source sf-venv/bin/active.fish
pip install -r requirements.txt
```
However, I got the following error
```
ERROR: Cannot install -r requirements.txt (line 4) and numpy>=1.20 because these package versions have conflicting dependencies.
The conflict is caused by:
The user requested numpy>=1.20
tensorflow 2.5.0 depends on numpy~=1.19.2
To fix this you could try to:
1. loosen the range of package versions you've specified
2. remove package versions to allow pip attempt to solve the dependency conflict
ERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies
```
#### Additional information
If it helps, I am using Python 3.9.4 and pip 21.1.1.
A quick fix would be to downgrade the version of numpy in requirements.txt and solve the issue, but I am not sure it is the best way to go.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 import sys
16
17 from setuptools import setup, find_packages
18
19
20 with open("strawberryfields/_version.py") as f:
21 version = f.readlines()[-1].split()[-1].strip("\"'")
22
23
24 requirements = [
25 "numpy>=1.17.4",
26 "scipy>=1.0.0",
27 "sympy>=1.5",
28 "networkx>=2.0",
29 "quantum-blackbird>=0.3.0",
30 "python-dateutil>=2.8.0",
31 "thewalrus>=0.15.0",
32 "numba",
33 "toml",
34 "appdirs",
35 "requests>=2.22.0",
36 "urllib3>=1.25.3",
37 ]
38
39 info = {
40 "name": "StrawberryFields",
41 "version": version,
42 "maintainer": "Xanadu Inc.",
43 "maintainer_email": "[email protected]",
44 "url": "https://github.com/XanaduAI/StrawberryFields",
45 "license": "Apache License 2.0",
46 "packages": find_packages(where="."),
47 "package_data": {"strawberryfields": ["backends/data/*", "apps/data/feature_data/*",
48 "apps/data/sample_data/*"]},
49 "include_package_data": True,
50 "entry_points" : {
51 'console_scripts': [
52 'sf=strawberryfields.cli:main'
53 ]
54 },
55 "description": "Open source library for continuous-variable quantum computation",
56 "long_description": open("README.rst", encoding="utf-8").read(),
57 "long_description_content_type": "text/x-rst",
58 "provides": ["strawberryfields"],
59 "install_requires": requirements,
60 # 'extras_require': extra_requirements,
61 "command_options": {
62 "build_sphinx": {"version": ("setup.py", version), "release": ("setup.py", version)}
63 },
64 }
65
66 classifiers = [
67 "Development Status :: 4 - Beta",
68 "Environment :: Console",
69 "Intended Audience :: Science/Research",
70 "License :: OSI Approved :: Apache Software License",
71 "Natural Language :: English",
72 "Operating System :: POSIX",
73 "Operating System :: MacOS :: MacOS X",
74 "Operating System :: POSIX :: Linux",
75 "Operating System :: Microsoft :: Windows",
76 "Programming Language :: Python",
77 "Programming Language :: Python :: 3",
78 "Programming Language :: Python :: 3.7",
79 "Programming Language :: Python :: 3.8",
80 "Programming Language :: Python :: 3.9",
81 "Programming Language :: Python :: 3 :: Only",
82 "Topic :: Scientific/Engineering :: Physics",
83 ]
84
85 setup(classifiers=classifiers, **(info))
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
requirements = [
- "numpy>=1.17.4",
+ "numpy>=1.19.2",
"scipy>=1.0.0",
"sympy>=1.5",
"networkx>=2.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \n \n requirements = [\n- \"numpy>=1.17.4\",\n+ \"numpy>=1.19.2\",\n \"scipy>=1.0.0\",\n \"sympy>=1.5\",\n \"networkx>=2.0\",\n", "issue": "Dependency versions error\n#### Issue description\r\nI made a fork of this project and tried to setup a new virtual environment.\r\n\r\n```\r\npython -m venv sf-venv\r\nsource sf-venv/bin/active.fish\r\npip install -r requirements.txt\r\n```\r\n\r\nHowever, I got the following error\r\n``` \r\nERROR: Cannot install -r requirements.txt (line 4) and numpy>=1.20 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n The user requested numpy>=1.20\r\n tensorflow 2.5.0 depends on numpy~=1.19.2\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies\r\n```\r\n\r\n#### Additional information\r\n\r\nIf it helps, I am using Python 3.9.4 and pip 21.1.1. \r\n\r\nA quick fix would be to downgrade the version of numpy in requirements.txt and solve the issue, but I am not sure it is the best way to go.\r\n\n", "before_files": [{"content": "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open(\"strawberryfields/_version.py\") as f:\n version = f.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nrequirements = [\n \"numpy>=1.17.4\",\n \"scipy>=1.0.0\",\n \"sympy>=1.5\",\n \"networkx>=2.0\",\n \"quantum-blackbird>=0.3.0\",\n \"python-dateutil>=2.8.0\",\n \"thewalrus>=0.15.0\",\n \"numba\",\n \"toml\",\n \"appdirs\",\n \"requests>=2.22.0\",\n \"urllib3>=1.25.3\",\n]\n\ninfo = {\n \"name\": \"StrawberryFields\",\n \"version\": version,\n \"maintainer\": \"Xanadu Inc.\",\n \"maintainer_email\": \"[email protected]\",\n \"url\": \"https://github.com/XanaduAI/StrawberryFields\",\n \"license\": \"Apache License 2.0\",\n \"packages\": find_packages(where=\".\"),\n \"package_data\": {\"strawberryfields\": [\"backends/data/*\", \"apps/data/feature_data/*\",\n \"apps/data/sample_data/*\"]},\n \"include_package_data\": True,\n \"entry_points\" : {\n 'console_scripts': [\n 'sf=strawberryfields.cli:main'\n ]\n },\n \"description\": \"Open source library for continuous-variable quantum computation\",\n \"long_description\": open(\"README.rst\", encoding=\"utf-8\").read(),\n \"long_description_content_type\": \"text/x-rst\",\n \"provides\": [\"strawberryfields\"],\n \"install_requires\": requirements,\n # 'extras_require': extra_requirements,\n \"command_options\": {\n \"build_sphinx\": {\"version\": (\"setup.py\", version), \"release\": (\"setup.py\", version)}\n },\n}\n\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering :: Physics\",\n]\n\nsetup(classifiers=classifiers, **(info))\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open(\"strawberryfields/_version.py\") as f:\n version = f.readlines()[-1].split()[-1].strip(\"\\\"'\")\n\n\nrequirements = [\n \"numpy>=1.19.2\",\n \"scipy>=1.0.0\",\n \"sympy>=1.5\",\n \"networkx>=2.0\",\n \"quantum-blackbird>=0.3.0\",\n \"python-dateutil>=2.8.0\",\n \"thewalrus>=0.15.0\",\n \"numba\",\n \"toml\",\n \"appdirs\",\n \"requests>=2.22.0\",\n \"urllib3>=1.25.3\",\n]\n\ninfo = {\n \"name\": \"StrawberryFields\",\n \"version\": version,\n \"maintainer\": \"Xanadu Inc.\",\n \"maintainer_email\": \"[email protected]\",\n \"url\": \"https://github.com/XanaduAI/StrawberryFields\",\n \"license\": \"Apache License 2.0\",\n \"packages\": find_packages(where=\".\"),\n \"package_data\": {\"strawberryfields\": [\"backends/data/*\", \"apps/data/feature_data/*\",\n \"apps/data/sample_data/*\"]},\n \"include_package_data\": True,\n \"entry_points\" : {\n 'console_scripts': [\n 'sf=strawberryfields.cli:main'\n ]\n },\n \"description\": \"Open source library for continuous-variable quantum computation\",\n \"long_description\": open(\"README.rst\", encoding=\"utf-8\").read(),\n \"long_description_content_type\": \"text/x-rst\",\n \"provides\": [\"strawberryfields\"],\n \"install_requires\": requirements,\n # 'extras_require': extra_requirements,\n \"command_options\": {\n \"build_sphinx\": {\"version\": (\"setup.py\", version), \"release\": (\"setup.py\", version)}\n },\n}\n\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering :: Physics\",\n]\n\nsetup(classifiers=classifiers, **(info))\n", "path": "setup.py"}]} | 1,418 | 90 |
gh_patches_debug_19530 | rasdani/github-patches | git_diff | mozmeao__snippets-service-995 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix NR reporting
See https://github.com/mozmeao/infra/issues/1106
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/wsgi/app.py`
Content:
```
1 """
2 WSGI config for snippets project.
3
4 It exposes the WSGI callable as a module-level variable named ``application``.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
8 """
9 import os
10 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA
11
12 from django.core.wsgi import get_wsgi_application
13
14 import newrelic.agent
15 from decouple import config
16 from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
17
18 application = get_wsgi_application()
19
20 application = Sentry(application)
21
22 # Add NewRelic
23 newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
24 newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
25 if newrelic_ini and newrelic_license_key:
26 newrelic.agent.initialize(newrelic_ini)
27 application = newrelic.agent.wsgi_application()(application)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/wsgi/app.py b/snippets/wsgi/app.py
--- a/snippets/wsgi/app.py
+++ b/snippets/wsgi/app.py
@@ -6,22 +6,14 @@
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
-import os
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA
-
-from django.core.wsgi import get_wsgi_application
-
import newrelic.agent
-from decouple import config
-from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
+newrelic.agent.initialize('newrelic.ini')
+import os # NOQA
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA
+
+from django.core.wsgi import get_wsgi_application # NOQA
application = get_wsgi_application()
+from raven.contrib.django.raven_compat.middleware.wsgi import Sentry # NOQA
application = Sentry(application)
-
-# Add NewRelic
-newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')
-newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
-if newrelic_ini and newrelic_license_key:
- newrelic.agent.initialize(newrelic_ini)
- application = newrelic.agent.wsgi_application()(application)
| {"golden_diff": "diff --git a/snippets/wsgi/app.py b/snippets/wsgi/app.py\n--- a/snippets/wsgi/app.py\n+++ b/snippets/wsgi/app.py\n@@ -6,22 +6,14 @@\n For more information on this file, see\n https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n \"\"\"\n-import os\n-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n-\n-from django.core.wsgi import get_wsgi_application\n-\n import newrelic.agent\n-from decouple import config\n-from raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n+newrelic.agent.initialize('newrelic.ini')\n \n+import os # NOQA\n+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n+\n+from django.core.wsgi import get_wsgi_application # NOQA\n application = get_wsgi_application()\n \n+from raven.contrib.django.raven_compat.middleware.wsgi import Sentry # NOQA\n application = Sentry(application)\n-\n-# Add NewRelic\n-newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\n-newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\n-if newrelic_ini and newrelic_license_key:\n- newrelic.agent.initialize(newrelic_ini)\n- application = newrelic.agent.wsgi_application()(application)\n", "issue": "Fix NR reporting\nSee https://github.com/mozmeao/infra/issues/1106\n", "before_files": [{"content": "\"\"\"\nWSGI config for snippets project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\napplication = get_wsgi_application()\n\napplication = Sentry(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "snippets/wsgi/app.py"}], "after_files": [{"content": "\"\"\"\nWSGI config for snippets project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport newrelic.agent\nnewrelic.agent.initialize('newrelic.ini')\n\nimport os # NOQA\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'snippets.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application # NOQA\napplication = get_wsgi_application()\n\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry # NOQA\napplication = Sentry(application)\n", "path": "snippets/wsgi/app.py"}]} | 540 | 311 |
gh_patches_debug_16876 | rasdani/github-patches | git_diff | chainer__chainer-1355 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possibly wrong doc or code for deconvolution cover_all
The documentation says:
```
cover_all (bool): If True, all spatial locations are convoluted into
some output pixels. It may make the output size larger.
```
However, when I prepare a small toy example, the output is larger when `cover_all=True`. I feel like either the code or the documentation needs to be inverted.
See an [gist notebook](https://gist.github.com/LukasDrude/8a9ebbaa3a6ba4ae0e2bef611afefd5a) for the toy example or the attached screenshot. I had set the weight matrices to ones and disabled normalization for clarity.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/pooling/unpooling_2d.py`
Content:
```
1 from chainer import cuda
2 from chainer.functions.pooling import pooling_2d
3 from chainer.utils import conv
4 from chainer.utils import type_check
5
6
7 class Unpooling2D(pooling_2d.Pooling2D):
8
9 """Unpooling over a set of 2d planes."""
10
11 def __init__(self, ksize, stride=None, pad=0,
12 outsize=None, cover_all=True):
13 super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)
14 self.outh, self.outw = (None, None) if outsize is None else outsize
15
16 def check_type_forward(self, in_types):
17 n_in = in_types.size()
18 type_check.expect(n_in == 1)
19 x_type = in_types[0]
20
21 type_check.expect(
22 x_type.dtype.kind == 'f',
23 x_type.ndim == 4,
24 )
25
26 if self.outh is not None:
27 expected_h = conv.get_conv_outsize(
28 self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
29 type_check.expect(x_type.shape[2] == expected_h)
30 if self.outw is not None:
31 expected_w = conv.get_conv_outsize(
32 self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
33 type_check.expect(x_type.shape[3] == expected_w)
34
35 def forward(self, x):
36 h, w = x[0].shape[2:]
37 if self.outh is None:
38 self.outh = conv.get_deconv_outsize(
39 h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
40 if self.outw is None:
41 self.outw = conv.get_deconv_outsize(
42 w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
43 xp = cuda.get_array_module(*x)
44 col = xp.tile(x[0][:, :, None, None],
45 (1, 1, self.kh, self.kw, 1, 1))
46 if isinstance(x[0], cuda.ndarray):
47 y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,
48 self.outh, self.outw)
49 else:
50 y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,
51 self.outh, self.outw)
52 return y,
53
54 def backward(self, x, gy):
55 if isinstance(gy[0], cuda.ndarray):
56 gcol = conv.im2col_gpu(
57 gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
58 cover_all=self.cover_all)
59 else:
60 gcol = conv.im2col_cpu(
61 gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
62 cover_all=self.cover_all)
63 gx = gcol.sum(axis=(2, 3))
64 return gx,
65
66
67 def unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):
68 """Inverse operation of pooling for 2d array.
69
70 This function acts similarly to :class:`~functions.Deconvolution2D`, but
71 it spreads input 2d array's value without any parameter instead of
72 computing the inner products.
73
74 Args:
75 x (~chainer.Variable): Input variable.
76 ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
77 ``ksize=(k, k)`` are equivalent.
78 stride (int, pair of ints or None): Stride of pooling applications.
79 ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
80 specified, then it uses same stride as the pooling window size.
81 pad (int or pair of ints): Spatial padding width for the input array.
82 ``pad=p`` and ``pad=(p, p)`` are equivalent.
83 outsize (None or pair of ints): Expected output size (height, width)
84 of array after the operation. If ``None``, the size
85 (height or width) is estimated from the size of input array
86 in first batch with
87 :func:`~chainer.utils.conv.get_deconv_outsize`.
88 If outsize is not ``None``, the result of outsize applied to
89 :func:`~chainer.utils.conv.get_conv_outsize` must be equal to
90 the shape of the 2d array in the input batch ``x``.
91 cover_all (bool): If ``True``, all spatial locations are pooled
92 into some output pixels, and the output size is larger than that
93 when cover_all is ``False``.
94
95 Returns:
96 ~chainer.Variable: Output variable.
97
98 """
99 return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/pooling/unpooling_2d.py b/chainer/functions/pooling/unpooling_2d.py
--- a/chainer/functions/pooling/unpooling_2d.py
+++ b/chainer/functions/pooling/unpooling_2d.py
@@ -88,9 +88,12 @@
If outsize is not ``None``, the result of outsize applied to
:func:`~chainer.utils.conv.get_conv_outsize` must be equal to
the shape of the 2d array in the input batch ``x``.
- cover_all (bool): If ``True``, all spatial locations are pooled
- into some output pixels, and the output size is larger than that
- when cover_all is ``False``.
+ cover_all (bool): If ``True``, the output size may be smaller than
+ the size if ``cover_all`` is ``False``. This flag serves to
+ align behavior to the pooling functions which can cover all
+ input locations, see :func:`~chainer.functions.max_pooling_2d`
+ and :func:`~chainer.functions.convolution_2d`.
+
Returns:
~chainer.Variable: Output variable.
| {"golden_diff": "diff --git a/chainer/functions/pooling/unpooling_2d.py b/chainer/functions/pooling/unpooling_2d.py\n--- a/chainer/functions/pooling/unpooling_2d.py\n+++ b/chainer/functions/pooling/unpooling_2d.py\n@@ -88,9 +88,12 @@\n If outsize is not ``None``, the result of outsize applied to\n :func:`~chainer.utils.conv.get_conv_outsize` must be equal to\n the shape of the 2d array in the input batch ``x``.\n- cover_all (bool): If ``True``, all spatial locations are pooled\n- into some output pixels, and the output size is larger than that\n- when cover_all is ``False``.\n+ cover_all (bool): If ``True``, the output size may be smaller than\n+ the size if ``cover_all`` is ``False``. This flag serves to\n+ align behavior to the pooling functions which can cover all\n+ input locations, see :func:`~chainer.functions.max_pooling_2d`\n+ and :func:`~chainer.functions.convolution_2d`.\n+\n \n Returns:\n ~chainer.Variable: Output variable.\n", "issue": "Possibly wrong doc or code for deconvolution cover_all\nThe documentation says:\n\n```\ncover_all (bool): If True, all spatial locations are convoluted into\n some output pixels. It may make the output size larger.\n```\n\nHowever, when I prepare a small toy example, the output is larger when `cover_all=True`. I feel like either the code or the documentation needs to be inverted.\n\nSee an [gist notebook](https://gist.github.com/LukasDrude/8a9ebbaa3a6ba4ae0e2bef611afefd5a) for the toy example or the attached screenshot. I had set the weight matrices to ones and disabled normalization for clarity.\n\n\n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer.functions.pooling import pooling_2d\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\n\nclass Unpooling2D(pooling_2d.Pooling2D):\n\n \"\"\"Unpooling over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0,\n outsize=None, cover_all=True):\n super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)\n self.outh, self.outw = (None, None) if outsize is None else outsize\n\n def check_type_forward(self, in_types):\n n_in = in_types.size()\n type_check.expect(n_in == 1)\n x_type = in_types[0]\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim == 4,\n )\n\n if self.outh is not None:\n expected_h = conv.get_conv_outsize(\n self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n type_check.expect(x_type.shape[2] == expected_h)\n if self.outw is not None:\n expected_w = conv.get_conv_outsize(\n self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n type_check.expect(x_type.shape[3] == expected_w)\n\n def forward(self, x):\n h, w = x[0].shape[2:]\n if self.outh is None:\n self.outh = conv.get_deconv_outsize(\n h, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n if self.outw is None:\n self.outw = conv.get_deconv_outsize(\n w, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n xp = cuda.get_array_module(*x)\n col = xp.tile(x[0][:, :, None, None],\n (1, 1, self.kh, self.kw, 1, 1))\n if isinstance(x[0], cuda.ndarray):\n y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n else:\n y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n return y,\n\n def backward(self, x, gy):\n if isinstance(gy[0], cuda.ndarray):\n gcol = conv.im2col_gpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n else:\n gcol = conv.im2col_cpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n gx = gcol.sum(axis=(2, 3))\n return gx,\n\n\ndef unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):\n \"\"\"Inverse operation of pooling for 2d array.\n\n This function acts similarly to :class:`~functions.Deconvolution2D`, but\n it spreads input 2d array's value without any parameter instead of\n computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int, pair of ints or None): Stride of pooling applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is\n specified, then it uses same stride as the pooling window size.\n pad (int or pair of ints): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n outsize (None or pair of ints): Expected output size (height, width)\n of array after the operation. If ``None``, the size\n (height or width) is estimated from the size of input array\n in first batch with\n :func:`~chainer.utils.conv.get_deconv_outsize`.\n If outsize is not ``None``, the result of outsize applied to\n :func:`~chainer.utils.conv.get_conv_outsize` must be equal to\n the shape of the 2d array in the input batch ``x``.\n cover_all (bool): If ``True``, all spatial locations are pooled\n into some output pixels, and the output size is larger than that\n when cover_all is ``False``.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)\n", "path": "chainer/functions/pooling/unpooling_2d.py"}], "after_files": [{"content": "from chainer import cuda\nfrom chainer.functions.pooling import pooling_2d\nfrom chainer.utils import conv\nfrom chainer.utils import type_check\n\n\nclass Unpooling2D(pooling_2d.Pooling2D):\n\n \"\"\"Unpooling over a set of 2d planes.\"\"\"\n\n def __init__(self, ksize, stride=None, pad=0,\n outsize=None, cover_all=True):\n super(Unpooling2D, self).__init__(ksize, stride, pad, cover_all)\n self.outh, self.outw = (None, None) if outsize is None else outsize\n\n def check_type_forward(self, in_types):\n n_in = in_types.size()\n type_check.expect(n_in == 1)\n x_type = in_types[0]\n\n type_check.expect(\n x_type.dtype.kind == 'f',\n x_type.ndim == 4,\n )\n\n if self.outh is not None:\n expected_h = conv.get_conv_outsize(\n self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n type_check.expect(x_type.shape[2] == expected_h)\n if self.outw is not None:\n expected_w = conv.get_conv_outsize(\n self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n type_check.expect(x_type.shape[3] == expected_w)\n\n def forward(self, x):\n h, w = x[0].shape[2:]\n if self.outh is None:\n self.outh = conv.get_deconv_outsize(\n h, self.kh, self.sy, self.ph, cover_all=self.cover_all)\n if self.outw is None:\n self.outw = conv.get_deconv_outsize(\n w, self.kw, self.sx, self.pw, cover_all=self.cover_all)\n xp = cuda.get_array_module(*x)\n col = xp.tile(x[0][:, :, None, None],\n (1, 1, self.kh, self.kw, 1, 1))\n if isinstance(x[0], cuda.ndarray):\n y = conv.col2im_gpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n else:\n y = conv.col2im_cpu(col, self.sy, self.sx, self.ph, self.pw,\n self.outh, self.outw)\n return y,\n\n def backward(self, x, gy):\n if isinstance(gy[0], cuda.ndarray):\n gcol = conv.im2col_gpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n else:\n gcol = conv.im2col_cpu(\n gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,\n cover_all=self.cover_all)\n gx = gcol.sum(axis=(2, 3))\n return gx,\n\n\ndef unpooling_2d(x, ksize, stride=None, pad=0, outsize=None, cover_all=True):\n \"\"\"Inverse operation of pooling for 2d array.\n\n This function acts similarly to :class:`~functions.Deconvolution2D`, but\n it spreads input 2d array's value without any parameter instead of\n computing the inner products.\n\n Args:\n x (~chainer.Variable): Input variable.\n ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and\n ``ksize=(k, k)`` are equivalent.\n stride (int, pair of ints or None): Stride of pooling applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is\n specified, then it uses same stride as the pooling window size.\n pad (int or pair of ints): Spatial padding width for the input array.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n outsize (None or pair of ints): Expected output size (height, width)\n of array after the operation. If ``None``, the size\n (height or width) is estimated from the size of input array\n in first batch with\n :func:`~chainer.utils.conv.get_deconv_outsize`.\n If outsize is not ``None``, the result of outsize applied to\n :func:`~chainer.utils.conv.get_conv_outsize` must be equal to\n the shape of the 2d array in the input batch ``x``.\n cover_all (bool): If ``True``, the output size may be smaller than\n the size if ``cover_all`` is ``False``. This flag serves to\n align behavior to the pooling functions which can cover all\n input locations, see :func:`~chainer.functions.max_pooling_2d`\n and :func:`~chainer.functions.convolution_2d`.\n\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Unpooling2D(ksize, stride, pad, outsize, cover_all)(x)\n", "path": "chainer/functions/pooling/unpooling_2d.py"}]} | 1,780 | 271 |
gh_patches_debug_11166 | rasdani/github-patches | git_diff | DataDog__dd-agent-2443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[marathon] Marathon plugin slows down agent when marathon has many apps running
We are monitoring a marathon framework using datadog which has over 150 apps, and the marathon check seems to be slowing down the entire datadog process.
After investigating what the plugin actually does, the problem seems to be this loop: https://github.com/DataDog/dd-agent/blob/5.4.4/checks.d/marathon.py#L46. It appears that the agent is sequentially hitting the API 150 times, which is enough to stop the agent from reporting metrics long enough to trigger some of our other alerts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checks.d/marathon.py`
Content:
```
1 # (C) Datadog, Inc. 2014-2016
2 # (C) graemej <[email protected]> 2014
3 # All rights reserved
4 # Licensed under Simplified BSD License (see LICENSE)
5
6
7 # stdlib
8 from urlparse import urljoin
9
10 # 3rd party
11 import requests
12
13 # project
14 from checks import AgentCheck
15
16
17 class Marathon(AgentCheck):
18
19 DEFAULT_TIMEOUT = 5
20 SERVICE_CHECK_NAME = 'marathon.can_connect'
21
22 APP_METRICS = [
23 'backoffFactor',
24 'backoffSeconds',
25 'cpus',
26 'disk',
27 'instances',
28 'mem',
29 'taskRateLimit',
30 'tasksRunning',
31 'tasksStaged'
32 ]
33
34 def check(self, instance):
35 if 'url' not in instance:
36 raise Exception('Marathon instance missing "url" value.')
37
38 # Load values from the instance config
39 url = instance['url']
40 user = instance.get('user')
41 password = instance.get('password')
42 if user is not None and password is not None:
43 auth = (user,password)
44 else:
45 auth = None
46 instance_tags = instance.get('tags', [])
47 default_timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)
48 timeout = float(instance.get('timeout', default_timeout))
49
50 response = self.get_json(urljoin(url, "/v2/apps"), timeout, auth)
51 if response is not None:
52 self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)
53 for app in response['apps']:
54 tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags
55 for attr in self.APP_METRICS:
56 if attr in app:
57 self.gauge('marathon.' + attr, app[attr], tags=tags)
58
59 query_url = urljoin(url, "/v2/apps/{0}/versions".format(app['id']))
60 versions_reply = self.get_json(query_url, timeout, auth)
61
62 if versions_reply is not None:
63 self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)
64
65 def get_json(self, url, timeout, auth):
66 try:
67 r = requests.get(url, timeout=timeout, auth=auth)
68 r.raise_for_status()
69 except requests.exceptions.Timeout:
70 # If there's a timeout
71 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
72 message='%s timed out after %s seconds.' % (url, timeout),
73 tags = ["url:{0}".format(url)])
74 raise Exception("Timeout when hitting %s" % url)
75
76 except requests.exceptions.HTTPError:
77 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
78 message='%s returned a status of %s' % (url, r.status_code),
79 tags = ["url:{0}".format(url)])
80 raise Exception("Got %s when hitting %s" % (r.status_code, url))
81
82 else:
83 self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
84 tags = ["url:{0}".format(url)]
85 )
86
87 return r.json()
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checks.d/marathon.py b/checks.d/marathon.py
--- a/checks.d/marathon.py
+++ b/checks.d/marathon.py
@@ -56,12 +56,6 @@
if attr in app:
self.gauge('marathon.' + attr, app[attr], tags=tags)
- query_url = urljoin(url, "/v2/apps/{0}/versions".format(app['id']))
- versions_reply = self.get_json(query_url, timeout, auth)
-
- if versions_reply is not None:
- self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)
-
def get_json(self, url, timeout, auth):
try:
r = requests.get(url, timeout=timeout, auth=auth)
| {"golden_diff": "diff --git a/checks.d/marathon.py b/checks.d/marathon.py\n--- a/checks.d/marathon.py\n+++ b/checks.d/marathon.py\n@@ -56,12 +56,6 @@\n if attr in app:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n \n- query_url = urljoin(url, \"/v2/apps/{0}/versions\".format(app['id']))\n- versions_reply = self.get_json(query_url, timeout, auth)\n-\n- if versions_reply is not None:\n- self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)\n-\n def get_json(self, url, timeout, auth):\n try:\n r = requests.get(url, timeout=timeout, auth=auth)\n", "issue": "[marathon] Marathon plugin slows down agent when marathon has many apps running\nWe are monitoring a marathon framework using datadog which has over 150 apps, and the marathon check seems to be slowing down the entire datadog process.\n\nAfter investigating what the plugin actually does, the problem seems to be this loop: https://github.com/DataDog/dd-agent/blob/5.4.4/checks.d/marathon.py#L46. It appears that the agent is sequentially hitting the API 150 times, which is enough to stop the agent from reporting metrics long enough to trigger some of our other alerts.\n\n", "before_files": [{"content": "# (C) Datadog, Inc. 2014-2016\n# (C) graemej <[email protected]> 2014\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n\n# stdlib\nfrom urlparse import urljoin\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\n\n\nclass Marathon(AgentCheck):\n\n DEFAULT_TIMEOUT = 5\n SERVICE_CHECK_NAME = 'marathon.can_connect'\n\n APP_METRICS = [\n 'backoffFactor',\n 'backoffSeconds',\n 'cpus',\n 'disk',\n 'instances',\n 'mem',\n 'taskRateLimit',\n 'tasksRunning',\n 'tasksStaged'\n ]\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Marathon instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n user = instance.get('user')\n password = instance.get('password')\n if user is not None and password is not None:\n auth = (user,password)\n else:\n auth = None\n instance_tags = instance.get('tags', [])\n default_timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)\n timeout = float(instance.get('timeout', default_timeout))\n\n response = self.get_json(urljoin(url, \"/v2/apps\"), timeout, auth)\n if response is not None:\n self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)\n for app in response['apps']:\n tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags\n for attr in self.APP_METRICS:\n if attr in app:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n\n query_url = urljoin(url, \"/v2/apps/{0}/versions\".format(app['id']))\n versions_reply = self.get_json(query_url, timeout, auth)\n\n if versions_reply is not None:\n self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)\n\n def get_json(self, url, timeout, auth):\n try:\n r = requests.get(url, timeout=timeout, auth=auth)\n r.raise_for_status()\n except requests.exceptions.Timeout:\n # If there's a timeout\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s timed out after %s seconds.' % (url, timeout),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Timeout when hitting %s\" % url)\n\n except requests.exceptions.HTTPError:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s returned a status of %s' % (url, r.status_code),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Got %s when hitting %s\" % (r.status_code, url))\n\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags = [\"url:{0}\".format(url)]\n )\n\n return r.json()\n", "path": "checks.d/marathon.py"}], "after_files": [{"content": "# (C) Datadog, Inc. 2014-2016\n# (C) graemej <[email protected]> 2014\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n\n# stdlib\nfrom urlparse import urljoin\n\n# 3rd party\nimport requests\n\n# project\nfrom checks import AgentCheck\n\n\nclass Marathon(AgentCheck):\n\n DEFAULT_TIMEOUT = 5\n SERVICE_CHECK_NAME = 'marathon.can_connect'\n\n APP_METRICS = [\n 'backoffFactor',\n 'backoffSeconds',\n 'cpus',\n 'disk',\n 'instances',\n 'mem',\n 'taskRateLimit',\n 'tasksRunning',\n 'tasksStaged'\n ]\n\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Marathon instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n user = instance.get('user')\n password = instance.get('password')\n if user is not None and password is not None:\n auth = (user,password)\n else:\n auth = None\n instance_tags = instance.get('tags', [])\n default_timeout = self.init_config.get('default_timeout', self.DEFAULT_TIMEOUT)\n timeout = float(instance.get('timeout', default_timeout))\n\n response = self.get_json(urljoin(url, \"/v2/apps\"), timeout, auth)\n if response is not None:\n self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)\n for app in response['apps']:\n tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags\n for attr in self.APP_METRICS:\n if attr in app:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n\n def get_json(self, url, timeout, auth):\n try:\n r = requests.get(url, timeout=timeout, auth=auth)\n r.raise_for_status()\n except requests.exceptions.Timeout:\n # If there's a timeout\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s timed out after %s seconds.' % (url, timeout),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Timeout when hitting %s\" % url)\n\n except requests.exceptions.HTTPError:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,\n message='%s returned a status of %s' % (url, r.status_code),\n tags = [\"url:{0}\".format(url)])\n raise Exception(\"Got %s when hitting %s\" % (r.status_code, url))\n\n else:\n self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,\n tags = [\"url:{0}\".format(url)]\n )\n\n return r.json()\n", "path": "checks.d/marathon.py"}]} | 1,258 | 179 |
gh_patches_debug_17970 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-604 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
link formatting not working properly
I made a markdown link, but the "<a href" part was trimmed and garbled html remained
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/status.py`
Content:
```
1 ''' what are we here for if not for posting '''
2 import re
3 from django.contrib.auth.decorators import login_required
4 from django.http import HttpResponseBadRequest
5 from django.shortcuts import get_object_or_404, redirect
6 from django.utils.decorators import method_decorator
7 from django.views import View
8 from markdown import markdown
9
10 from bookwyrm import forms, models
11 from bookwyrm.sanitize_html import InputHtmlParser
12 from bookwyrm.settings import DOMAIN
13 from bookwyrm.status import create_notification, delete_status
14 from bookwyrm.utils import regex
15 from .helpers import handle_remote_webfinger
16
17
18 # pylint: disable= no-self-use
19 @method_decorator(login_required, name='dispatch')
20 class CreateStatus(View):
21 ''' the view for *posting* '''
22 def post(self, request, status_type):
23 ''' create status of whatever type '''
24 status_type = status_type[0].upper() + status_type[1:]
25
26 try:
27 form = getattr(forms, '%sForm' % status_type)(request.POST)
28 except AttributeError:
29 return HttpResponseBadRequest()
30 if not form.is_valid():
31 return redirect(request.headers.get('Referer', '/'))
32
33 status = form.save(commit=False)
34 if not status.sensitive and status.content_warning:
35 # the cw text field remains populated when you click "remove"
36 status.content_warning = None
37 status.save(broadcast=False)
38
39 # inspect the text for user tags
40 content = status.content
41 for (mention_text, mention_user) in find_mentions(content):
42 # add them to status mentions fk
43 status.mention_users.add(mention_user)
44
45 # turn the mention into a link
46 content = re.sub(
47 r'%s([^@]|$)' % mention_text,
48 r'<a href="%s">%s</a>\g<1>' % \
49 (mention_user.remote_id, mention_text),
50 content)
51
52 # add reply parent to mentions and notify
53 if status.reply_parent:
54 status.mention_users.add(status.reply_parent.user)
55
56 if status.reply_parent.user.local:
57 create_notification(
58 status.reply_parent.user,
59 'REPLY',
60 related_user=request.user,
61 related_status=status
62 )
63
64 # deduplicate mentions
65 status.mention_users.set(set(status.mention_users.all()))
66 # create mention notifications
67 for mention_user in status.mention_users.all():
68 if status.reply_parent and mention_user == status.reply_parent.user:
69 continue
70 if mention_user.local:
71 create_notification(
72 mention_user,
73 'MENTION',
74 related_user=request.user,
75 related_status=status
76 )
77
78 # don't apply formatting to generated notes
79 if not isinstance(status, models.GeneratedNote):
80 status.content = to_markdown(content)
81 # do apply formatting to quotes
82 if hasattr(status, 'quote'):
83 status.quote = to_markdown(status.quote)
84
85 status.save(created=True)
86 return redirect(request.headers.get('Referer', '/'))
87
88
89 class DeleteStatus(View):
90 ''' tombstone that bad boy '''
91 def post(self, request, status_id):
92 ''' delete and tombstone a status '''
93 status = get_object_or_404(models.Status, id=status_id)
94
95 # don't let people delete other people's statuses
96 if status.user != request.user:
97 return HttpResponseBadRequest()
98
99 # perform deletion
100 delete_status(status)
101 return redirect(request.headers.get('Referer', '/'))
102
103 def find_mentions(content):
104 ''' detect @mentions in raw status content '''
105 for match in re.finditer(regex.strict_username, content):
106 username = match.group().strip().split('@')[1:]
107 if len(username) == 1:
108 # this looks like a local user (@user), fill in the domain
109 username.append(DOMAIN)
110 username = '@'.join(username)
111
112 mention_user = handle_remote_webfinger(username)
113 if not mention_user:
114 # we can ignore users we don't know about
115 continue
116 yield (match.group(), mention_user)
117
118
119 def format_links(content):
120 ''' detect and format links '''
121 return re.sub(
122 r'([^(href=")]|^|\()(https?:\/\/(%s([\w\.\-_\/+&\?=:;,])*))' % \
123 regex.domain,
124 r'\g<1><a href="\g<2>">\g<3></a>',
125 content)
126
127 def to_markdown(content):
128 ''' catch links and convert to markdown '''
129 content = format_links(content)
130 content = markdown(content)
131 # sanitize resulting html
132 sanitizer = InputHtmlParser()
133 sanitizer.feed(content)
134 return sanitizer.get_output()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/status.py b/bookwyrm/views/status.py
--- a/bookwyrm/views/status.py
+++ b/bookwyrm/views/status.py
@@ -48,7 +48,6 @@
r'<a href="%s">%s</a>\g<1>' % \
(mention_user.remote_id, mention_text),
content)
-
# add reply parent to mentions and notify
if status.reply_parent:
status.mention_users.add(status.reply_parent.user)
@@ -126,8 +125,8 @@
def to_markdown(content):
''' catch links and convert to markdown '''
- content = format_links(content)
content = markdown(content)
+ content = format_links(content)
# sanitize resulting html
sanitizer = InputHtmlParser()
sanitizer.feed(content)
| {"golden_diff": "diff --git a/bookwyrm/views/status.py b/bookwyrm/views/status.py\n--- a/bookwyrm/views/status.py\n+++ b/bookwyrm/views/status.py\n@@ -48,7 +48,6 @@\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n-\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n@@ -126,8 +125,8 @@\n \n def to_markdown(content):\n ''' catch links and convert to markdown '''\n- content = format_links(content)\n content = markdown(content)\n+ content = format_links(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n", "issue": "link formatting not working properly\nI made a markdown link, but the \"<a href\" part was trimmed and garbled html remained\n", "before_files": [{"content": "''' what are we here for if not for posting '''\nimport re\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom markdown import markdown\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.sanitize_html import InputHtmlParser\nfrom bookwyrm.settings import DOMAIN\nfrom bookwyrm.status import create_notification, delete_status\nfrom bookwyrm.utils import regex\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass CreateStatus(View):\n ''' the view for *posting* '''\n def post(self, request, status_type):\n ''' create status of whatever type '''\n status_type = status_type[0].upper() + status_type[1:]\n\n try:\n form = getattr(forms, '%sForm' % status_type)(request.POST)\n except AttributeError:\n return HttpResponseBadRequest()\n if not form.is_valid():\n return redirect(request.headers.get('Referer', '/'))\n\n status = form.save(commit=False)\n if not status.sensitive and status.content_warning:\n # the cw text field remains populated when you click \"remove\"\n status.content_warning = None\n status.save(broadcast=False)\n\n # inspect the text for user tags\n content = status.content\n for (mention_text, mention_user) in find_mentions(content):\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n\n # turn the mention into a link\n content = re.sub(\n r'%s([^@]|$)' % mention_text,\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n\n if status.reply_parent.user.local:\n create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=request.user,\n related_status=status\n )\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n # create mention notifications\n for mention_user in status.mention_users.all():\n if status.reply_parent and mention_user == status.reply_parent.user:\n continue\n if mention_user.local:\n create_notification(\n mention_user,\n 'MENTION',\n related_user=request.user,\n related_status=status\n )\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote):\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, 'quote'):\n status.quote = to_markdown(status.quote)\n\n status.save(created=True)\n return redirect(request.headers.get('Referer', '/'))\n\n\nclass DeleteStatus(View):\n ''' tombstone that bad boy '''\n def post(self, request, status_id):\n ''' delete and tombstone a status '''\n status = get_object_or_404(models.Status, id=status_id)\n\n # don't let people delete other people's statuses\n if status.user != request.user:\n return HttpResponseBadRequest()\n\n # perform deletion\n delete_status(status)\n return redirect(request.headers.get('Referer', '/'))\n\ndef find_mentions(content):\n ''' detect @mentions in raw status content '''\n for match in re.finditer(regex.strict_username, content):\n username = match.group().strip().split('@')[1:]\n if len(username) == 1:\n # this looks like a local user (@user), fill in the domain\n username.append(DOMAIN)\n username = '@'.join(username)\n\n mention_user = handle_remote_webfinger(username)\n if not mention_user:\n # we can ignore users we don't know about\n continue\n yield (match.group(), mention_user)\n\n\ndef format_links(content):\n ''' detect and format links '''\n return re.sub(\n r'([^(href=\")]|^|\\()(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n\ndef to_markdown(content):\n ''' catch links and convert to markdown '''\n content = format_links(content)\n content = markdown(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n return sanitizer.get_output()\n", "path": "bookwyrm/views/status.py"}], "after_files": [{"content": "''' what are we here for if not for posting '''\nimport re\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom markdown import markdown\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.sanitize_html import InputHtmlParser\nfrom bookwyrm.settings import DOMAIN\nfrom bookwyrm.status import create_notification, delete_status\nfrom bookwyrm.utils import regex\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass CreateStatus(View):\n ''' the view for *posting* '''\n def post(self, request, status_type):\n ''' create status of whatever type '''\n status_type = status_type[0].upper() + status_type[1:]\n\n try:\n form = getattr(forms, '%sForm' % status_type)(request.POST)\n except AttributeError:\n return HttpResponseBadRequest()\n if not form.is_valid():\n return redirect(request.headers.get('Referer', '/'))\n\n status = form.save(commit=False)\n if not status.sensitive and status.content_warning:\n # the cw text field remains populated when you click \"remove\"\n status.content_warning = None\n status.save(broadcast=False)\n\n # inspect the text for user tags\n content = status.content\n for (mention_text, mention_user) in find_mentions(content):\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n\n # turn the mention into a link\n content = re.sub(\n r'%s([^@]|$)' % mention_text,\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n\n if status.reply_parent.user.local:\n create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=request.user,\n related_status=status\n )\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n # create mention notifications\n for mention_user in status.mention_users.all():\n if status.reply_parent and mention_user == status.reply_parent.user:\n continue\n if mention_user.local:\n create_notification(\n mention_user,\n 'MENTION',\n related_user=request.user,\n related_status=status\n )\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote):\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, 'quote'):\n status.quote = to_markdown(status.quote)\n\n status.save(created=True)\n return redirect(request.headers.get('Referer', '/'))\n\n\nclass DeleteStatus(View):\n ''' tombstone that bad boy '''\n def post(self, request, status_id):\n ''' delete and tombstone a status '''\n status = get_object_or_404(models.Status, id=status_id)\n\n # don't let people delete other people's statuses\n if status.user != request.user:\n return HttpResponseBadRequest()\n\n # perform deletion\n delete_status(status)\n return redirect(request.headers.get('Referer', '/'))\n\ndef find_mentions(content):\n ''' detect @mentions in raw status content '''\n for match in re.finditer(regex.strict_username, content):\n username = match.group().strip().split('@')[1:]\n if len(username) == 1:\n # this looks like a local user (@user), fill in the domain\n username.append(DOMAIN)\n username = '@'.join(username)\n\n mention_user = handle_remote_webfinger(username)\n if not mention_user:\n # we can ignore users we don't know about\n continue\n yield (match.group(), mention_user)\n\n\ndef format_links(content):\n ''' detect and format links '''\n return re.sub(\n r'([^(href=\")]|^|\\()(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n\ndef to_markdown(content):\n ''' catch links and convert to markdown '''\n content = markdown(content)\n content = format_links(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n return sanitizer.get_output()\n", "path": "bookwyrm/views/status.py"}]} | 1,565 | 177 |
gh_patches_debug_15384 | rasdani/github-patches | git_diff | bridgecrewio__checkov-2303 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_GIT_3 should not be triggered on archived repositories
**Describe the issue**
_CKV_GIT_3_ currently gets triggered also on archived GitHub repositories. When archiving a repository the configuration `vulnerability_alerts` will get changed to `false` automatically. It's also not possible to turn it on again on an archived repository. _CKV_GIT_3_ should be changed to ignore archived repositories.
**Examples**
```terraform
resource "github_repository" "test" {
name = "test"
visibility = "private"
archived = true
vulnerability_alerts = false
}
```
**Version (please complete the following information):**
- Starting with Checkov Version 2.0.764
**Additional context**
See the [GitHub documentation](https://docs.github.com/en/code-security/supply-chain-security/managing-vulnerabilities-in-your-projects-dependencies/configuring-dependabot-security-updates#supported-repositories) that Dependabot is only supported on non-archived repositories.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py`
Content:
```
1 from typing import Any
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck
5
6
7 class GithubRepositoryVulnerabilityAlerts(BaseResourceCheck):
8 def __init__(self) -> None:
9 name = "Ensure GitHub repository has vulnerability alerts enabled"
10 id = "CKV_GIT_3"
11 supported_resources = ["github_repository"]
12 categories = [CheckCategories.GENERAL_SECURITY]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf) -> CheckResult:
16 # GitHub enables the alerts on public repos but disables them on private repos by default.
17 # is private repo
18 if conf.get("private") == [True] or conf.get("visibility") in [["private"], ["internal"]]:
19 if conf.get("vulnerability_alerts"):
20 return CheckResult.PASSED
21 return CheckResult.FAILED
22 # is public repo
23 if conf.get("vulnerability_alerts") == [False]:
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26
27
28 check = GithubRepositoryVulnerabilityAlerts()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py
--- a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py
+++ b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py
@@ -13,6 +13,9 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf) -> CheckResult:
+ # GitHub disables the alerts when archiving the repository without an option to turn them on again.
+ if conf.get("archived") == [True]:
+ return CheckResult.PASSED
# GitHub enables the alerts on public repos but disables them on private repos by default.
# is private repo
if conf.get("private") == [True] or conf.get("visibility") in [["private"], ["internal"]]:
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py\n--- a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py\n+++ b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py\n@@ -13,6 +13,9 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf) -> CheckResult:\n+ # GitHub disables the alerts when archiving the repository without an option to turn them on again.\n+ if conf.get(\"archived\") == [True]:\n+ return CheckResult.PASSED\n # GitHub enables the alerts on public repos but disables them on private repos by default.\n # is private repo\n if conf.get(\"private\") == [True] or conf.get(\"visibility\") in [[\"private\"], [\"internal\"]]:\n", "issue": "CKV_GIT_3 should not be triggered on archived repositories\n**Describe the issue**\r\n_CKV_GIT_3_ currently gets triggered also on archived GitHub repositories. When archiving a repository the configuration `vulnerability_alerts` will get changed to `false` automatically. It's also not possible to turn it on again on an archived repository. _CKV_GIT_3_ should be changed to ignore archived repositories.\r\n\r\n**Examples**\r\n\r\n```terraform\r\nresource \"github_repository\" \"test\" {\r\n name = \"test\"\r\n visibility = \"private\"\r\n archived = true\r\n vulnerability_alerts = false\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Starting with Checkov Version 2.0.764\r\n\r\n**Additional context**\r\nSee the [GitHub documentation](https://docs.github.com/en/code-security/supply-chain-security/managing-vulnerabilities-in-your-projects-dependencies/configuring-dependabot-security-updates#supported-repositories) that Dependabot is only supported on non-archived repositories.\r\n\n", "before_files": [{"content": "from typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass GithubRepositoryVulnerabilityAlerts(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure GitHub repository has vulnerability alerts enabled\"\n id = \"CKV_GIT_3\"\n supported_resources = [\"github_repository\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n # GitHub enables the alerts on public repos but disables them on private repos by default.\n # is private repo\n if conf.get(\"private\") == [True] or conf.get(\"visibility\") in [[\"private\"], [\"internal\"]]:\n if conf.get(\"vulnerability_alerts\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n # is public repo\n if conf.get(\"vulnerability_alerts\") == [False]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = GithubRepositoryVulnerabilityAlerts()\n", "path": "checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py"}], "after_files": [{"content": "from typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass GithubRepositoryVulnerabilityAlerts(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure GitHub repository has vulnerability alerts enabled\"\n id = \"CKV_GIT_3\"\n supported_resources = [\"github_repository\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n # GitHub disables the alerts when archiving the repository without an option to turn them on again.\n if conf.get(\"archived\") == [True]:\n return CheckResult.PASSED\n # GitHub enables the alerts on public repos but disables them on private repos by default.\n # is private repo\n if conf.get(\"private\") == [True] or conf.get(\"visibility\") in [[\"private\"], [\"internal\"]]:\n if conf.get(\"vulnerability_alerts\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n # is public repo\n if conf.get(\"vulnerability_alerts\") == [False]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = GithubRepositoryVulnerabilityAlerts()\n", "path": "checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py"}]} | 804 | 219 |
gh_patches_debug_19462 | rasdani/github-patches | git_diff | sublimelsp__LSP-1997 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
goto commands don't restore selection when location picking is canceled
**Describe the bug**
when there's more than one location available for a `goto*` command, a quick panel is shown to pick.
highlighting entries modifies the selection, canceling the operation doesn't restore the initial selection.
**Expected behavior**
it should restore the selection, like ST's built-in
**Screenshots**

**Environment (please complete the following information):**
- OS: Windows 10
- Sublime Text version: 4126
- LSP version: 1.16.3
- Language servers used: LSP-rust-analyzer
**Additional context**
Add any other context about the problem here. For example, whether you're using a helper
package or your manual server configuration in LSP.sublime-settings. When using
a manual server configuration please include it here if you believe it's applicable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/locationpicker.py`
Content:
```
1 from .core.logging import debug
2 from .core.protocol import DocumentUri, Location, Position
3 from .core.protocol import LocationLink
4 from .core.sessions import Session
5 from .core.typing import Union, List, Optional, Tuple
6 from .core.views import get_uri_and_position_from_location
7 from .core.views import location_to_human_readable
8 from .core.views import to_encoded_filename
9 import functools
10 import sublime
11 import weakref
12
13
14 def open_location_async(
15 session: Session,
16 location: Union[Location, LocationLink],
17 side_by_side: bool,
18 force_group: bool
19 ) -> None:
20 flags = sublime.ENCODED_POSITION
21 if force_group:
22 flags |= sublime.FORCE_GROUP
23 if side_by_side:
24 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT
25
26 def check_success_async(view: Optional[sublime.View]) -> None:
27 if not view:
28 sublime.error_message("Unable to open URI")
29
30 session.open_location_async(location, flags).then(check_success_async)
31
32
33 def open_basic_file(
34 session: Session,
35 uri: str,
36 position: Position,
37 flags: int = 0,
38 group: Optional[int] = None
39 ) -> sublime.View:
40 filename = session.config.map_server_uri_to_client_path(uri)
41 if group is None:
42 group = session.window.active_group()
43 return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)
44
45
46 class LocationPicker:
47
48 def __init__(
49 self,
50 view: sublime.View,
51 session: Session,
52 locations: Union[List[Location], List[LocationLink]],
53 side_by_side: bool
54 ) -> None:
55 self._view = view
56 window = view.window()
57 if not window:
58 raise ValueError("missing window")
59 self._window = window
60 self._weaksession = weakref.ref(session)
61 self._side_by_side = side_by_side
62 self._items = locations
63 self._highlighted_view = None # type: Optional[sublime.View]
64 manager = session.manager()
65 base_dir = manager.get_project_path(view.file_name() or "") if manager else None
66 self._window.show_quick_panel(
67 items=[location_to_human_readable(session.config, base_dir, location) for location in locations],
68 on_select=self._select_entry,
69 on_highlight=self._highlight_entry,
70 flags=sublime.KEEP_OPEN_ON_FOCUS_LOST
71 )
72
73 def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:
74 location = self._items[index]
75 uri, position = get_uri_and_position_from_location(location)
76 return self._weaksession(), location, uri, position
77
78 def _select_entry(self, index: int) -> None:
79 if index >= 0 and self._view.is_valid():
80 session, location, uri, position = self._unpack(index)
81 if not session:
82 return
83 # Note: this has to run on the main thread (and not via open_location_async)
84 # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.
85 if uri.startswith("file:"):
86 flags = sublime.ENCODED_POSITION
87 if not self._side_by_side:
88 open_basic_file(session, uri, position, flags)
89 else:
90 sublime.set_timeout_async(
91 functools.partial(open_location_async, session, location, self._side_by_side, True))
92 else:
93 self._window.focus_view(self._view)
94 # When in side-by-side mode close the current highlighted
95 # sheet upon canceling if the sheet is semi-transient
96 if self._side_by_side and self._highlighted_view:
97 sheet = self._highlighted_view.sheet()
98 if sheet and sheet.is_semi_transient():
99 self._highlighted_view.close()
100
101 def _highlight_entry(self, index: int) -> None:
102 session, _, uri, position = self._unpack(index)
103 if not session:
104 return
105 if uri.startswith("file:"):
106 flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP
107 if self._side_by_side:
108 if self._highlighted_view and self._highlighted_view.is_valid():
109 # Replacing the MRU is done relative to the current highlighted sheet
110 self._window.focus_view(self._highlighted_view)
111 flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT
112 else:
113 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT
114 else:
115 flags |= sublime.TRANSIENT
116 self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())
117 else:
118 # TODO: Preview non-file uris?
119 debug("no preview for", uri)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py
--- a/plugin/locationpicker.py
+++ b/plugin/locationpicker.py
@@ -53,6 +53,7 @@
side_by_side: bool
) -> None:
self._view = view
+ self._view_states = ([r.to_tuple() for r in view.sel()], view.viewport_position())
window = view.window()
if not window:
raise ValueError("missing window")
@@ -76,6 +77,9 @@
return self._weaksession(), location, uri, position
def _select_entry(self, index: int) -> None:
+ if self._view.is_valid() and not self._side_by_side:
+ self._view.set_viewport_position(self._view_states[1])
+ self._view.run_command('lsp_selection_set', {'regions': self._view_states[0]})
if index >= 0 and self._view.is_valid():
session, location, uri, position = self._unpack(index)
if not session:
| {"golden_diff": "diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py\n--- a/plugin/locationpicker.py\n+++ b/plugin/locationpicker.py\n@@ -53,6 +53,7 @@\n side_by_side: bool\n ) -> None:\n self._view = view\n+ self._view_states = ([r.to_tuple() for r in view.sel()], view.viewport_position())\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n@@ -76,6 +77,9 @@\n return self._weaksession(), location, uri, position\n \n def _select_entry(self, index: int) -> None:\n+ if self._view.is_valid() and not self._side_by_side:\n+ self._view.set_viewport_position(self._view_states[1])\n+ self._view.run_command('lsp_selection_set', {'regions': self._view_states[0]})\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n", "issue": "goto commands don't restore selection when location picking is canceled\n**Describe the bug**\r\nwhen there's more than one location available for a `goto*` command, a quick panel is shown to pick.\r\nhighlighting entries modifies the selection, canceling the operation doesn't restore the initial selection.\r\n\r\n**Expected behavior**\r\nit should restore the selection, like ST's built-in\r\n\r\n**Screenshots**\r\n\r\n\r\n**Environment (please complete the following information):**\r\n- OS: Windows 10\r\n- Sublime Text version: 4126\r\n- LSP version: 1.16.3\r\n- Language servers used: LSP-rust-analyzer\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. For example, whether you're using a helper\r\npackage or your manual server configuration in LSP.sublime-settings. When using\r\na manual server configuration please include it here if you believe it's applicable.\r\n\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import DocumentUri, Location, Position\nfrom .core.protocol import LocationLink\nfrom .core.sessions import Session\nfrom .core.typing import Union, List, Optional, Tuple\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import location_to_human_readable\nfrom .core.views import to_encoded_filename\nimport functools\nimport sublime\nimport weakref\n\n\ndef open_location_async(\n session: Session,\n location: Union[Location, LocationLink],\n side_by_side: bool,\n force_group: bool\n) -> None:\n flags = sublime.ENCODED_POSITION\n if force_group:\n flags |= sublime.FORCE_GROUP\n if side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n\n def check_success_async(view: Optional[sublime.View]) -> None:\n if not view:\n sublime.error_message(\"Unable to open URI\")\n\n session.open_location_async(location, flags).then(check_success_async)\n\n\ndef open_basic_file(\n session: Session,\n uri: str,\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n) -> sublime.View:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n\n\nclass LocationPicker:\n\n def __init__(\n self,\n view: sublime.View,\n session: Session,\n locations: Union[List[Location], List[LocationLink]],\n side_by_side: bool\n ) -> None:\n self._view = view\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n self._window = window\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n self._highlighted_view = None # type: Optional[sublime.View]\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n items=[location_to_human_readable(session.config, base_dir, location) for location in locations],\n on_select=self._select_entry,\n on_highlight=self._highlight_entry,\n flags=sublime.KEEP_OPEN_ON_FOCUS_LOST\n )\n\n def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:\n location = self._items[index]\n uri, position = get_uri_and_position_from_location(location)\n return self._weaksession(), location, uri, position\n\n def _select_entry(self, index: int) -> None:\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n return\n # Note: this has to run on the main thread (and not via open_location_async)\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n if not self._side_by_side:\n open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(\n functools.partial(open_location_async, session, location, self._side_by_side, True))\n else:\n self._window.focus_view(self._view)\n # When in side-by-side mode close the current highlighted\n # sheet upon canceling if the sheet is semi-transient\n if self._side_by_side and self._highlighted_view:\n sheet = self._highlighted_view.sheet()\n if sheet and sheet.is_semi_transient():\n self._highlighted_view.close()\n\n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP\n if self._side_by_side:\n if self._highlighted_view and self._highlighted_view.is_valid():\n # Replacing the MRU is done relative to the current highlighted sheet\n self._window.focus_view(self._highlighted_view)\n flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.TRANSIENT\n self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "path": "plugin/locationpicker.py"}], "after_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import DocumentUri, Location, Position\nfrom .core.protocol import LocationLink\nfrom .core.sessions import Session\nfrom .core.typing import Union, List, Optional, Tuple\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import location_to_human_readable\nfrom .core.views import to_encoded_filename\nimport functools\nimport sublime\nimport weakref\n\n\ndef open_location_async(\n session: Session,\n location: Union[Location, LocationLink],\n side_by_side: bool,\n force_group: bool\n) -> None:\n flags = sublime.ENCODED_POSITION\n if force_group:\n flags |= sublime.FORCE_GROUP\n if side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n\n def check_success_async(view: Optional[sublime.View]) -> None:\n if not view:\n sublime.error_message(\"Unable to open URI\")\n\n session.open_location_async(location, flags).then(check_success_async)\n\n\ndef open_basic_file(\n session: Session,\n uri: str,\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n) -> sublime.View:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n\n\nclass LocationPicker:\n\n def __init__(\n self,\n view: sublime.View,\n session: Session,\n locations: Union[List[Location], List[LocationLink]],\n side_by_side: bool\n ) -> None:\n self._view = view\n self._view_states = ([r.to_tuple() for r in view.sel()], view.viewport_position())\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n self._window = window\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n self._highlighted_view = None # type: Optional[sublime.View]\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n items=[location_to_human_readable(session.config, base_dir, location) for location in locations],\n on_select=self._select_entry,\n on_highlight=self._highlight_entry,\n flags=sublime.KEEP_OPEN_ON_FOCUS_LOST\n )\n\n def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:\n location = self._items[index]\n uri, position = get_uri_and_position_from_location(location)\n return self._weaksession(), location, uri, position\n\n def _select_entry(self, index: int) -> None:\n if self._view.is_valid() and not self._side_by_side:\n self._view.set_viewport_position(self._view_states[1])\n self._view.run_command('lsp_selection_set', {'regions': self._view_states[0]})\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n return\n # Note: this has to run on the main thread (and not via open_location_async)\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n if not self._side_by_side:\n open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(\n functools.partial(open_location_async, session, location, self._side_by_side, True))\n else:\n self._window.focus_view(self._view)\n # When in side-by-side mode close the current highlighted\n # sheet upon canceling if the sheet is semi-transient\n if self._side_by_side and self._highlighted_view:\n sheet = self._highlighted_view.sheet()\n if sheet and sheet.is_semi_transient():\n self._highlighted_view.close()\n\n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP\n if self._side_by_side:\n if self._highlighted_view and self._highlighted_view.is_valid():\n # Replacing the MRU is done relative to the current highlighted sheet\n self._window.focus_view(self._highlighted_view)\n flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.TRANSIENT\n self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "path": "plugin/locationpicker.py"}]} | 1,791 | 226 |
gh_patches_debug_17636 | rasdani/github-patches | git_diff | svthalia__concrexit-3528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Albums pagination doesn't maintain search terms
### Describe the bug
If you open https://thalia.nu/members/photos/?keywords=borrel#photos-albums, then go to the second page using the pagination buttons, the search term is dropped.
### Expected behaviour
<!-- A clear and concise description of what you expected to happen. -->
The search term remains
### Additional context
<!-- Add any other context about the problem here. -->
Could be since we introduced the shared paginated view template? So it's quite likely this occurs for other paginated filterable/searchable views as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/thaliawebsite/views.py`
Content:
```
1 """General views for the website."""
2
3 from django.contrib.admin.views.decorators import staff_member_required
4 from django.contrib.auth.views import LoginView, PasswordResetView
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponse, HttpResponseForbidden
7 from django.shortcuts import redirect
8 from django.utils.decorators import method_decorator
9 from django.views.generic import ListView, TemplateView
10 from django.views.generic.base import View
11
12 from django_ratelimit.decorators import ratelimit
13
14
15 class IndexView(TemplateView):
16 template_name = "index.html"
17
18
19 @method_decorator(staff_member_required, "dispatch")
20 class TestCrashView(View):
21 """Test view to intentionally crash to test the error handling."""
22
23 def dispatch(self, request, *args, **kwargs) -> HttpResponse:
24 if not request.user.is_superuser:
25 return HttpResponseForbidden("This is not for you")
26 raise Exception("Test exception")
27
28
29 class PagedView(ListView):
30 """A ListView with automatic pagination."""
31
32 def get_context_data(self, **kwargs) -> dict:
33 context = super().get_context_data(**kwargs)
34 page = context["page_obj"].number
35 paginator = context["paginator"]
36
37 # Show the two pages before and after the current page
38 page_range_start = max(1, page - 2)
39 page_range_stop = min(page + 3, paginator.num_pages + 1)
40
41 # Add extra pages if we show less than 5 pages
42 page_range_start = min(page_range_start, page_range_stop - 5)
43 page_range_start = max(1, page_range_start)
44
45 # Add extra pages if we still show less than 5 pages
46 page_range_stop = max(page_range_stop, page_range_start + 5)
47 page_range_stop = min(page_range_stop, paginator.num_pages + 1)
48
49 page_range = range(page_range_start, page_range_stop)
50
51 context.update(
52 {
53 "page_range": page_range,
54 }
55 )
56
57 return context
58
59
60 class RateLimitedPasswordResetView(PasswordResetView):
61 @method_decorator(ratelimit(key="ip", rate="5/h"))
62 def post(self, request, *args, **kwargs):
63 return super().post(request, *args, **kwargs)
64
65
66 class RateLimitedLoginView(LoginView):
67 @method_decorator(ratelimit(key="ip", rate="30/h"))
68 @method_decorator(ratelimit(key="post:username", rate="30/h"))
69 def post(self, request, *args, **kwargs):
70 return super().post(request, *args, **kwargs)
71
72
73 def rate_limited_view(request, *args, **kwargs):
74 return HttpResponse("You are rate limited", status=429)
75
76
77 def admin_unauthorized_view(request):
78 if not request.member:
79 url = "/user/login"
80 args = request.META.get("QUERY_STRING", "")
81 if args:
82 url = f"{url}?{args}"
83 return redirect(url)
84 elif not request.member.is_staff and not request.member.is_superuser:
85 raise PermissionDenied("You are not allowed to access the administration page.")
86 else:
87 return redirect(request.GET.get("next", "/"))
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py
--- a/website/thaliawebsite/views.py
+++ b/website/thaliawebsite/views.py
@@ -31,6 +31,7 @@
def get_context_data(self, **kwargs) -> dict:
context = super().get_context_data(**kwargs)
+ print(kwargs)
page = context["page_obj"].number
paginator = context["paginator"]
@@ -48,9 +49,17 @@
page_range = range(page_range_start, page_range_stop)
+ querydict = self.request.GET.copy()
+
+ if "page" in querydict:
+ del querydict["page"]
+
context.update(
{
"page_range": page_range,
+ "base_url": f"{self.request.path}?{querydict.urlencode()}&"
+ if querydict
+ else f"{self.request.path}?",
}
)
| {"golden_diff": "diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py\n--- a/website/thaliawebsite/views.py\n+++ b/website/thaliawebsite/views.py\n@@ -31,6 +31,7 @@\n \n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n+ print(kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n \n@@ -48,9 +49,17 @@\n \n page_range = range(page_range_start, page_range_stop)\n \n+ querydict = self.request.GET.copy()\n+\n+ if \"page\" in querydict:\n+ del querydict[\"page\"]\n+\n context.update(\n {\n \"page_range\": page_range,\n+ \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n+ if querydict\n+ else f\"{self.request.path}?\",\n }\n )\n", "issue": "Albums pagination doesn't maintain search terms\n### Describe the bug\r\nIf you open https://thalia.nu/members/photos/?keywords=borrel#photos-albums, then go to the second page using the pagination buttons, the search term is dropped.\r\n\r\n### Expected behaviour\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe search term remains\r\n\r\n### Additional context\r\n<!-- Add any other context about the problem here. -->\r\nCould be since we introduced the shared paginated view template? So it's quite likely this occurs for other paginated filterable/searchable views as well.\n", "before_files": [{"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LoginView, PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n context.update(\n {\n \"page_range\": page_range,\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}], "after_files": [{"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LoginView, PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n print(kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n querydict = self.request.GET.copy()\n\n if \"page\" in querydict:\n del querydict[\"page\"]\n\n context.update(\n {\n \"page_range\": page_range,\n \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n if querydict\n else f\"{self.request.path}?\",\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}]} | 1,228 | 216 |
gh_patches_debug_23410 | rasdani/github-patches | git_diff | OCA__bank-payment-630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[12.0][BUG] account_payment_sale
Hi
I have found a bug in module account_payment_sale, but I am not sure how to fix it nicely.
The payment_mode_id does not propagate from the sale order to the invoice.
I guess the tests are a bit to naive, that is why they pass anyway.
Here we try to propagate the payment mode : https://github.com/OCA/bank-payment/blob/12.0/account_payment_sale/models/sale_order.py#L35
Here, the invoice is created with the right value (coming from the SO) : https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L521
And it is overriden here https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L570
I really don't get why they have refactored it this way, they create the invoice and then they override a lot of values...
And I do not really see a clean solution to solve this.
Any idea?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `account_payment_sale/models/sale_order.py`
Content:
```
1 # Copyright 2014-2016 Akretion - Alexis de Lattre
2 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
3
4 from odoo import models, fields, api
5
6
7 class SaleOrder(models.Model):
8 _inherit = "sale.order"
9
10 payment_mode_id = fields.Many2one(
11 'account.payment.mode', string='Payment Mode',
12 domain=[('payment_type', '=', 'inbound')])
13
14 def _get_payment_mode_vals(self, vals):
15 if self.payment_mode_id:
16 vals['payment_mode_id'] = self.payment_mode_id.id
17 if self.payment_mode_id.bank_account_link == 'fixed':
18 vals['partner_bank_id'] =\
19 self.payment_mode_id.fixed_journal_id.bank_account_id.id
20 return vals
21
22 @api.onchange('partner_id')
23 def onchange_partner_id(self):
24 res = super().onchange_partner_id()
25 if self.partner_id:
26 self.payment_mode_id = self.partner_id.customer_payment_mode_id
27 else:
28 self.payment_mode_id = False
29 return res
30
31 @api.multi
32 def _prepare_invoice(self):
33 """Copy bank partner from sale order to invoice"""
34 vals = super()._prepare_invoice()
35 return self._get_payment_mode_vals(vals)
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/account_payment_sale/models/sale_order.py b/account_payment_sale/models/sale_order.py
--- a/account_payment_sale/models/sale_order.py
+++ b/account_payment_sale/models/sale_order.py
@@ -33,3 +33,31 @@
"""Copy bank partner from sale order to invoice"""
vals = super()._prepare_invoice()
return self._get_payment_mode_vals(vals)
+
+ def _finalize_invoices(self, invoices, references):
+ """
+ Invoked after creating invoices at the end of action_invoice_create.
+
+ We must override this method since the onchange on partner is called by
+ the base method and therefore will change the specific payment_mode set
+ on the SO if one is defined on the partner..
+
+ :param invoices: {group_key: invoice}
+ :param references: {invoice: order}
+ """
+ payment_vals_by_invoice = {}
+ for invoice in invoices.values():
+ payment_vals_by_invoice[invoice] = {
+ 'payment_mode_id': invoice.payment_mode_id.id,
+ 'partner_bank_id': invoice.partner_bank_id.id
+ }
+ res = super()._finalize_invoices(invoices, references)
+ for invoice in invoices.values():
+ payment_vals = payment_vals_by_invoice[invoice]
+ if invoice.payment_mode_id.id == payment_vals['payment_mode_id']:
+ payment_vals.pop("payment_mode_id")
+ if invoice.partner_bank_id.id == payment_vals["partner_bank_id"]:
+ payment_vals.pop("partner_bank_id")
+ if payment_vals:
+ invoice.write(payment_vals)
+ return res
| {"golden_diff": "diff --git a/account_payment_sale/models/sale_order.py b/account_payment_sale/models/sale_order.py\n--- a/account_payment_sale/models/sale_order.py\n+++ b/account_payment_sale/models/sale_order.py\n@@ -33,3 +33,31 @@\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n vals = super()._prepare_invoice()\n return self._get_payment_mode_vals(vals)\n+\n+ def _finalize_invoices(self, invoices, references):\n+ \"\"\"\n+ Invoked after creating invoices at the end of action_invoice_create.\n+\n+ We must override this method since the onchange on partner is called by\n+ the base method and therefore will change the specific payment_mode set\n+ on the SO if one is defined on the partner..\n+\n+ :param invoices: {group_key: invoice}\n+ :param references: {invoice: order}\n+ \"\"\"\n+ payment_vals_by_invoice = {}\n+ for invoice in invoices.values():\n+ payment_vals_by_invoice[invoice] = {\n+ 'payment_mode_id': invoice.payment_mode_id.id,\n+ 'partner_bank_id': invoice.partner_bank_id.id\n+ }\n+ res = super()._finalize_invoices(invoices, references)\n+ for invoice in invoices.values():\n+ payment_vals = payment_vals_by_invoice[invoice]\n+ if invoice.payment_mode_id.id == payment_vals['payment_mode_id']:\n+ payment_vals.pop(\"payment_mode_id\")\n+ if invoice.partner_bank_id.id == payment_vals[\"partner_bank_id\"]:\n+ payment_vals.pop(\"partner_bank_id\")\n+ if payment_vals:\n+ invoice.write(payment_vals)\n+ return res\n", "issue": "[12.0][BUG] account_payment_sale\nHi\r\nI have found a bug in module account_payment_sale, but I am not sure how to fix it nicely.\r\nThe payment_mode_id does not propagate from the sale order to the invoice. \r\nI guess the tests are a bit to naive, that is why they pass anyway.\r\nHere we try to propagate the payment mode : https://github.com/OCA/bank-payment/blob/12.0/account_payment_sale/models/sale_order.py#L35\r\nHere, the invoice is created with the right value (coming from the SO) : https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L521\r\nAnd it is overriden here https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L570\r\n\r\nI really don't get why they have refactored it this way, they create the invoice and then they override a lot of values...\r\nAnd I do not really see a clean solution to solve this.\r\nAny idea?\n", "before_files": [{"content": "# Copyright 2014-2016 Akretion - Alexis de Lattre\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import models, fields, api\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n payment_mode_id = fields.Many2one(\n 'account.payment.mode', string='Payment Mode',\n domain=[('payment_type', '=', 'inbound')])\n\n def _get_payment_mode_vals(self, vals):\n if self.payment_mode_id:\n vals['payment_mode_id'] = self.payment_mode_id.id\n if self.payment_mode_id.bank_account_link == 'fixed':\n vals['partner_bank_id'] =\\\n self.payment_mode_id.fixed_journal_id.bank_account_id.id\n return vals\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n res = super().onchange_partner_id()\n if self.partner_id:\n self.payment_mode_id = self.partner_id.customer_payment_mode_id\n else:\n self.payment_mode_id = False\n return res\n\n @api.multi\n def _prepare_invoice(self):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n vals = super()._prepare_invoice()\n return self._get_payment_mode_vals(vals)\n", "path": "account_payment_sale/models/sale_order.py"}], "after_files": [{"content": "# Copyright 2014-2016 Akretion - Alexis de Lattre\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import models, fields, api\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n payment_mode_id = fields.Many2one(\n 'account.payment.mode', string='Payment Mode',\n domain=[('payment_type', '=', 'inbound')])\n\n def _get_payment_mode_vals(self, vals):\n if self.payment_mode_id:\n vals['payment_mode_id'] = self.payment_mode_id.id\n if self.payment_mode_id.bank_account_link == 'fixed':\n vals['partner_bank_id'] =\\\n self.payment_mode_id.fixed_journal_id.bank_account_id.id\n return vals\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n res = super().onchange_partner_id()\n if self.partner_id:\n self.payment_mode_id = self.partner_id.customer_payment_mode_id\n else:\n self.payment_mode_id = False\n return res\n\n @api.multi\n def _prepare_invoice(self):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n vals = super()._prepare_invoice()\n return self._get_payment_mode_vals(vals)\n\n def _finalize_invoices(self, invoices, references):\n \"\"\"\n Invoked after creating invoices at the end of action_invoice_create.\n\n We must override this method since the onchange on partner is called by\n the base method and therefore will change the specific payment_mode set\n on the SO if one is defined on the partner..\n\n :param invoices: {group_key: invoice}\n :param references: {invoice: order}\n \"\"\"\n payment_vals_by_invoice = {}\n for invoice in invoices.values():\n payment_vals_by_invoice[invoice] = {\n 'payment_mode_id': invoice.payment_mode_id.id,\n 'partner_bank_id': invoice.partner_bank_id.id\n }\n res = super()._finalize_invoices(invoices, references)\n for invoice in invoices.values():\n payment_vals = payment_vals_by_invoice[invoice]\n if invoice.payment_mode_id.id == payment_vals['payment_mode_id']:\n payment_vals.pop(\"payment_mode_id\")\n if invoice.partner_bank_id.id == payment_vals[\"partner_bank_id\"]:\n payment_vals.pop(\"partner_bank_id\")\n if payment_vals:\n invoice.write(payment_vals)\n return res\n", "path": "account_payment_sale/models/sale_order.py"}]} | 836 | 350 |
gh_patches_debug_15077 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1748 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
botbuilder-core library is missing the botframework-streaming dependency
## Version
4.14.0.20210616.dev252366
## Describe the bug
The botbuilder-core library is missing the botframework-streaming dependency.
When running a python bot with the botbuilder-core library installed, it won't run because it is missing the botframework-streaming dependency.
The dependency reference is missing from the requirements.txt file, and this new library is not published in any of the regular packages indexes ([test.pypi](https://test.pypi.org/), [pypi](https://pypi.org/) and [azure artifacts](https://dev.azure.com/ConversationalAI/BotFramework/_packaging?_a=feed&feed=SDK%40Local)), so it can't be installed manually.
When running the bots locally it is possible to install the dependency from a local folder with the code cloned from the repo.
## To Reproduce
1. Open a bot that uses the botbuilder-core library.
2. Install a preview version (4.14.x).
3. Run the bot.
## Expected behavior
The dependencies being installed should install all the required sub-dependencies or have them available for manual installation.
## Screenshots

## Additional context
This issue is blocking the pipelines from the [BotFramework-FunctionalTests](https://github.com/microsoft/BotFramework-FunctionalTests/) repository from testing preview versions of the BotBuilder Python libraries.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botframework-streaming/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0"
8 REQUIRES = [
9 "botbuilder-schema>=4.12.0",
10 "botframework-connector>=4.12.0",
11 "botbuilder-core>=4.12.0",
12 ]
13
14 root = os.path.abspath(os.path.dirname(__file__))
15
16 with open(os.path.join(root, "botframework", "streaming", "about.py")) as f:
17 package_info = {}
18 info = f.read()
19 exec(info, package_info)
20
21 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
22 long_description = f.read()
23
24 setup(
25 name=package_info["__title__"],
26 version=package_info["__version__"],
27 url=package_info["__uri__"],
28 author=package_info["__author__"],
29 description=package_info["__description__"],
30 keywords=["BotFrameworkStreaming", "bots", "ai", "botframework", "botframework",],
31 long_description=long_description,
32 long_description_content_type="text/x-rst",
33 license=package_info["__license__"],
34 packages=[
35 "botframework.streaming",
36 "botframework.streaming.payloads",
37 "botframework.streaming.payloads.models",
38 "botframework.streaming.payload_transport",
39 "botframework.streaming.transport",
40 "botframework.streaming.transport.web_socket",
41 ],
42 install_requires=REQUIRES,
43 classifiers=[
44 "Programming Language :: Python :: 3.7",
45 "Intended Audience :: Developers",
46 "License :: OSI Approved :: MIT License",
47 "Operating System :: OS Independent",
48 "Development Status :: 5 - Production/Stable",
49 "Topic :: Scientific/Engineering :: Artificial Intelligence",
50 ],
51 )
52
```
Path: `libraries/botbuilder-core/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.14.0"
8 REQUIRES = [
9 "botbuilder-schema==4.14.0",
10 "botframework-connector==4.14.0",
11 "jsonpickle>=1.2,<1.5",
12 ]
13
14 root = os.path.abspath(os.path.dirname(__file__))
15
16 with open(os.path.join(root, "botbuilder", "core", "about.py")) as f:
17 package_info = {}
18 info = f.read()
19 exec(info, package_info)
20
21 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
22 long_description = f.read()
23
24 setup(
25 name=package_info["__title__"],
26 version=package_info["__version__"],
27 url=package_info["__uri__"],
28 author=package_info["__author__"],
29 description=package_info["__description__"],
30 keywords=["BotBuilderCore", "bots", "ai", "botframework", "botbuilder"],
31 long_description=long_description,
32 long_description_content_type="text/x-rst",
33 license=package_info["__license__"],
34 packages=[
35 "botbuilder.core",
36 "botbuilder.core.adapters",
37 "botbuilder.core.inspection",
38 "botbuilder.core.integration",
39 "botbuilder.core.skills",
40 "botbuilder.core.streaming",
41 "botbuilder.core.teams",
42 "botbuilder.core.oauth",
43 ],
44 install_requires=REQUIRES,
45 classifiers=[
46 "Programming Language :: Python :: 3.7",
47 "Intended Audience :: Developers",
48 "License :: OSI Approved :: MIT License",
49 "Operating System :: OS Independent",
50 "Development Status :: 5 - Production/Stable",
51 "Topic :: Scientific/Engineering :: Artificial Intelligence",
52 ],
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-core/setup.py b/libraries/botbuilder-core/setup.py
--- a/libraries/botbuilder-core/setup.py
+++ b/libraries/botbuilder-core/setup.py
@@ -8,6 +8,7 @@
REQUIRES = [
"botbuilder-schema==4.14.0",
"botframework-connector==4.14.0",
+ "botframework-streaming==4.14.0",
"jsonpickle>=1.2,<1.5",
]
diff --git a/libraries/botframework-streaming/setup.py b/libraries/botframework-streaming/setup.py
--- a/libraries/botframework-streaming/setup.py
+++ b/libraries/botframework-streaming/setup.py
@@ -4,11 +4,10 @@
import os
from setuptools import setup
-VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0"
+VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.14.0"
REQUIRES = [
"botbuilder-schema>=4.12.0",
"botframework-connector>=4.12.0",
- "botbuilder-core>=4.12.0",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botbuilder-core/setup.py b/libraries/botbuilder-core/setup.py\n--- a/libraries/botbuilder-core/setup.py\n+++ b/libraries/botbuilder-core/setup.py\n@@ -8,6 +8,7 @@\n REQUIRES = [\n \"botbuilder-schema==4.14.0\",\n \"botframework-connector==4.14.0\",\n+ \"botframework-streaming==4.14.0\",\n \"jsonpickle>=1.2,<1.5\",\n ]\n \ndiff --git a/libraries/botframework-streaming/setup.py b/libraries/botframework-streaming/setup.py\n--- a/libraries/botframework-streaming/setup.py\n+++ b/libraries/botframework-streaming/setup.py\n@@ -4,11 +4,10 @@\n import os\n from setuptools import setup\n \n-VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\n+VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\n REQUIRES = [\n \"botbuilder-schema>=4.12.0\",\n \"botframework-connector>=4.12.0\",\n- \"botbuilder-core>=4.12.0\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "botbuilder-core library is missing the botframework-streaming dependency\n## Version\r\n4.14.0.20210616.dev252366\r\n\r\n## Describe the bug\r\nThe botbuilder-core library is missing the botframework-streaming dependency.\r\nWhen running a python bot with the botbuilder-core library installed, it won't run because it is missing the botframework-streaming dependency.\r\nThe dependency reference is missing from the requirements.txt file, and this new library is not published in any of the regular packages indexes ([test.pypi](https://test.pypi.org/), [pypi](https://pypi.org/) and [azure artifacts](https://dev.azure.com/ConversationalAI/BotFramework/_packaging?_a=feed&feed=SDK%40Local)), so it can't be installed manually.\r\nWhen running the bots locally it is possible to install the dependency from a local folder with the code cloned from the repo.\r\n\r\n## To Reproduce\r\n1. Open a bot that uses the botbuilder-core library.\r\n2. Install a preview version (4.14.x).\r\n3. Run the bot.\r\n\r\n## Expected behavior\r\nThe dependencies being installed should install all the required sub-dependencies or have them available for manual installation.\r\n\r\n## Screenshots\r\n\r\n\r\n## Additional context\r\nThis issue is blocking the pipelines from the [BotFramework-FunctionalTests](https://github.com/microsoft/BotFramework-FunctionalTests/) repository from testing preview versions of the BotBuilder Python libraries.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"botbuilder-schema>=4.12.0\",\n \"botframework-connector>=4.12.0\",\n \"botbuilder-core>=4.12.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botframework\", \"streaming\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotFrameworkStreaming\", \"bots\", \"ai\", \"botframework\", \"botframework\",],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botframework.streaming\",\n \"botframework.streaming.payloads\",\n \"botframework.streaming.payloads.models\",\n \"botframework.streaming.payload_transport\",\n \"botframework.streaming.transport\",\n \"botframework.streaming.transport.web_socket\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-streaming/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\nREQUIRES = [\n \"botbuilder-schema==4.14.0\",\n \"botframework-connector==4.14.0\",\n \"jsonpickle>=1.2,<1.5\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"core\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderCore\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.core\",\n \"botbuilder.core.adapters\",\n \"botbuilder.core.inspection\",\n \"botbuilder.core.integration\",\n \"botbuilder.core.skills\",\n \"botbuilder.core.streaming\",\n \"botbuilder.core.teams\",\n \"botbuilder.core.oauth\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-core/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\nREQUIRES = [\n \"botbuilder-schema>=4.12.0\",\n \"botframework-connector>=4.12.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botframework\", \"streaming\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotFrameworkStreaming\", \"bots\", \"ai\", \"botframework\", \"botframework\",],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botframework.streaming\",\n \"botframework.streaming.payloads\",\n \"botframework.streaming.payloads.models\",\n \"botframework.streaming.payload_transport\",\n \"botframework.streaming.transport\",\n \"botframework.streaming.transport.web_socket\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-streaming/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\nREQUIRES = [\n \"botbuilder-schema==4.14.0\",\n \"botframework-connector==4.14.0\",\n \"botframework-streaming==4.14.0\",\n \"jsonpickle>=1.2,<1.5\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"core\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderCore\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.core\",\n \"botbuilder.core.adapters\",\n \"botbuilder.core.inspection\",\n \"botbuilder.core.integration\",\n \"botbuilder.core.skills\",\n \"botbuilder.core.streaming\",\n \"botbuilder.core.teams\",\n \"botbuilder.core.oauth\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-core/setup.py"}]} | 1,674 | 293 |
gh_patches_debug_18689 | rasdani/github-patches | git_diff | sanic-org__sanic-1553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to start server -- Running run_async.py failed
**Describe the bug**
[2019-04-14 19:22:02 +0800] [21512] [INFO] Goin' Fast @ http://0.0.0.0:8000
[2019-04-14 19:22:02 +0800] [21512] [ERROR] Unable to start server
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\envs\venom\lib\site-packages\sanic\server.py", line 745, in serve
http_server = loop.run_until_complete(server_coroutine)
File "C:\ProgramData\Anaconda3\envs\venom\lib\asyncio\base_events.py", line 571, in run_until_complete
self.run_forever()
File "C:\ProgramData\Anaconda3\envs\venom\lib\asyncio\base_events.py", line 529, in run_forever
'Cannot run the event loop while another loop is running')
RuntimeError: Cannot run the event loop while another loop is running
**Code snippet**
Relevant source code, make sure to remove what is not necessary.
https://github.com/huge-success/sanic/blob/master/examples/run_async.py
**Expected behavior**
A clear and concise description of what you expected to happen.
**Environment (please complete the following information):**
- OS: [e.g. iOS]
- Version [e.g. 0.8.3]
Window and Linux, Python 3.6 or 3.7 don't work
**Additional context**
Add any other context about the problem here.
Is this example still work ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/log_request_id.py`
Content:
```
1 '''
2 Based on example from https://github.com/Skyscanner/aiotask-context
3 and `examples/{override_logging,run_async}.py`.
4
5 Needs https://github.com/Skyscanner/aiotask-context/tree/52efbc21e2e1def2d52abb9a8e951f3ce5e6f690 or newer
6
7 $ pip install git+https://github.com/Skyscanner/aiotask-context.git
8 '''
9
10 import asyncio
11 import uuid
12 import logging
13 from signal import signal, SIGINT
14
15 from sanic import Sanic
16 from sanic import response
17
18 import uvloop
19 import aiotask_context as context
20
21 log = logging.getLogger(__name__)
22
23
24 class RequestIdFilter(logging.Filter):
25 def filter(self, record):
26 record.request_id = context.get('X-Request-ID')
27 return True
28
29
30 LOG_SETTINGS = {
31 'version': 1,
32 'disable_existing_loggers': False,
33 'handlers': {
34 'console': {
35 'class': 'logging.StreamHandler',
36 'level': 'DEBUG',
37 'formatter': 'default',
38 'filters': ['requestid'],
39 },
40 },
41 'filters': {
42 'requestid': {
43 '()': RequestIdFilter,
44 },
45 },
46 'formatters': {
47 'default': {
48 'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(request_id)s | %(message)s',
49 },
50 },
51 'loggers': {
52 '': {
53 'level': 'DEBUG',
54 'handlers': ['console'],
55 'propagate': True
56 },
57 }
58 }
59
60
61 app = Sanic(__name__, log_config=LOG_SETTINGS)
62
63
64 @app.middleware('request')
65 async def set_request_id(request):
66 request_id = request.headers.get('X-Request-ID') or str(uuid.uuid4())
67 context.set("X-Request-ID", request_id)
68
69
70 @app.route("/")
71 async def test(request):
72 log.debug('X-Request-ID: %s', context.get('X-Request-ID'))
73 log.info('Hello from test!')
74 return response.json({"test": True})
75
76
77 if __name__ == '__main__':
78 asyncio.set_event_loop(uvloop.new_event_loop())
79 server = app.create_server(host="0.0.0.0", port=8000)
80 loop = asyncio.get_event_loop()
81 loop.set_task_factory(context.task_factory)
82 task = asyncio.ensure_future(server)
83 try:
84 loop.run_forever()
85 except:
86 loop.stop()
87
```
Path: `examples/run_async.py`
Content:
```
1 from sanic import Sanic
2 from sanic import response
3 from signal import signal, SIGINT
4 import asyncio
5 import uvloop
6
7 app = Sanic(__name__)
8
9
10 @app.route("/")
11 async def test(request):
12 return response.json({"answer": "42"})
13
14 asyncio.set_event_loop(uvloop.new_event_loop())
15 server = app.create_server(host="0.0.0.0", port=8000)
16 loop = asyncio.get_event_loop()
17 task = asyncio.ensure_future(server)
18 signal(SIGINT, lambda s, f: loop.stop())
19 try:
20 loop.run_forever()
21 except:
22 loop.stop()
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/log_request_id.py b/examples/log_request_id.py
--- a/examples/log_request_id.py
+++ b/examples/log_request_id.py
@@ -76,7 +76,7 @@
if __name__ == '__main__':
asyncio.set_event_loop(uvloop.new_event_loop())
- server = app.create_server(host="0.0.0.0", port=8000)
+ server = app.create_server(host="0.0.0.0", port=8000, return_asyncio_server=True)
loop = asyncio.get_event_loop()
loop.set_task_factory(context.task_factory)
task = asyncio.ensure_future(server)
diff --git a/examples/run_async.py b/examples/run_async.py
--- a/examples/run_async.py
+++ b/examples/run_async.py
@@ -12,7 +12,7 @@
return response.json({"answer": "42"})
asyncio.set_event_loop(uvloop.new_event_loop())
-server = app.create_server(host="0.0.0.0", port=8000)
+server = app.create_server(host="0.0.0.0", port=8000, return_asyncio_server=True)
loop = asyncio.get_event_loop()
task = asyncio.ensure_future(server)
signal(SIGINT, lambda s, f: loop.stop())
| {"golden_diff": "diff --git a/examples/log_request_id.py b/examples/log_request_id.py\n--- a/examples/log_request_id.py\n+++ b/examples/log_request_id.py\n@@ -76,7 +76,7 @@\n \n if __name__ == '__main__':\n asyncio.set_event_loop(uvloop.new_event_loop())\n- server = app.create_server(host=\"0.0.0.0\", port=8000)\n+ server = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\n loop = asyncio.get_event_loop()\n loop.set_task_factory(context.task_factory)\n task = asyncio.ensure_future(server)\ndiff --git a/examples/run_async.py b/examples/run_async.py\n--- a/examples/run_async.py\n+++ b/examples/run_async.py\n@@ -12,7 +12,7 @@\n return response.json({\"answer\": \"42\"})\n \n asyncio.set_event_loop(uvloop.new_event_loop())\n-server = app.create_server(host=\"0.0.0.0\", port=8000)\n+server = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\n loop = asyncio.get_event_loop()\n task = asyncio.ensure_future(server)\n signal(SIGINT, lambda s, f: loop.stop())\n", "issue": "Unable to start server -- Running run_async.py failed\n**Describe the bug**\r\n[2019-04-14 19:22:02 +0800] [21512] [INFO] Goin' Fast @ http://0.0.0.0:8000\r\n[2019-04-14 19:22:02 +0800] [21512] [ERROR] Unable to start server\r\nTraceback (most recent call last):\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\venom\\lib\\site-packages\\sanic\\server.py\", line 745, in serve\r\n http_server = loop.run_until_complete(server_coroutine)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\venom\\lib\\asyncio\\base_events.py\", line 571, in run_until_complete\r\n self.run_forever()\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\venom\\lib\\asyncio\\base_events.py\", line 529, in run_forever\r\n 'Cannot run the event loop while another loop is running')\r\nRuntimeError: Cannot run the event loop while another loop is running\r\n\r\n**Code snippet**\r\nRelevant source code, make sure to remove what is not necessary.\r\n\r\nhttps://github.com/huge-success/sanic/blob/master/examples/run_async.py\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Version [e.g. 0.8.3]\r\nWindow and Linux, Python 3.6 or 3.7 don't work\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n\r\nIs this example still work ?\n", "before_files": [{"content": "'''\nBased on example from https://github.com/Skyscanner/aiotask-context\nand `examples/{override_logging,run_async}.py`.\n\nNeeds https://github.com/Skyscanner/aiotask-context/tree/52efbc21e2e1def2d52abb9a8e951f3ce5e6f690 or newer\n\n$ pip install git+https://github.com/Skyscanner/aiotask-context.git\n'''\n\nimport asyncio\nimport uuid\nimport logging\nfrom signal import signal, SIGINT\n\nfrom sanic import Sanic\nfrom sanic import response\n\nimport uvloop\nimport aiotask_context as context\n\nlog = logging.getLogger(__name__)\n\n\nclass RequestIdFilter(logging.Filter):\n def filter(self, record):\n record.request_id = context.get('X-Request-ID')\n return True\n\n\nLOG_SETTINGS = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'default',\n 'filters': ['requestid'],\n },\n },\n 'filters': {\n 'requestid': {\n '()': RequestIdFilter,\n },\n },\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(request_id)s | %(message)s',\n },\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': True\n },\n }\n}\n\n\napp = Sanic(__name__, log_config=LOG_SETTINGS)\n\n\[email protected]('request')\nasync def set_request_id(request):\n request_id = request.headers.get('X-Request-ID') or str(uuid.uuid4())\n context.set(\"X-Request-ID\", request_id)\n\n\[email protected](\"/\")\nasync def test(request):\n log.debug('X-Request-ID: %s', context.get('X-Request-ID'))\n log.info('Hello from test!')\n return response.json({\"test\": True})\n\n\nif __name__ == '__main__':\n asyncio.set_event_loop(uvloop.new_event_loop())\n server = app.create_server(host=\"0.0.0.0\", port=8000)\n loop = asyncio.get_event_loop()\n loop.set_task_factory(context.task_factory)\n task = asyncio.ensure_future(server)\n try:\n loop.run_forever()\n except:\n loop.stop()\n", "path": "examples/log_request_id.py"}, {"content": "from sanic import Sanic\nfrom sanic import response\nfrom signal import signal, SIGINT\nimport asyncio\nimport uvloop\n\napp = Sanic(__name__)\n\n\[email protected](\"/\")\nasync def test(request):\n return response.json({\"answer\": \"42\"})\n\nasyncio.set_event_loop(uvloop.new_event_loop())\nserver = app.create_server(host=\"0.0.0.0\", port=8000)\nloop = asyncio.get_event_loop()\ntask = asyncio.ensure_future(server)\nsignal(SIGINT, lambda s, f: loop.stop())\ntry:\n loop.run_forever()\nexcept:\n loop.stop()\n", "path": "examples/run_async.py"}], "after_files": [{"content": "'''\nBased on example from https://github.com/Skyscanner/aiotask-context\nand `examples/{override_logging,run_async}.py`.\n\nNeeds https://github.com/Skyscanner/aiotask-context/tree/52efbc21e2e1def2d52abb9a8e951f3ce5e6f690 or newer\n\n$ pip install git+https://github.com/Skyscanner/aiotask-context.git\n'''\n\nimport asyncio\nimport uuid\nimport logging\nfrom signal import signal, SIGINT\n\nfrom sanic import Sanic\nfrom sanic import response\n\nimport uvloop\nimport aiotask_context as context\n\nlog = logging.getLogger(__name__)\n\n\nclass RequestIdFilter(logging.Filter):\n def filter(self, record):\n record.request_id = context.get('X-Request-ID')\n return True\n\n\nLOG_SETTINGS = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'default',\n 'filters': ['requestid'],\n },\n },\n 'filters': {\n 'requestid': {\n '()': RequestIdFilter,\n },\n },\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(request_id)s | %(message)s',\n },\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': True\n },\n }\n}\n\n\napp = Sanic(__name__, log_config=LOG_SETTINGS)\n\n\[email protected]('request')\nasync def set_request_id(request):\n request_id = request.headers.get('X-Request-ID') or str(uuid.uuid4())\n context.set(\"X-Request-ID\", request_id)\n\n\[email protected](\"/\")\nasync def test(request):\n log.debug('X-Request-ID: %s', context.get('X-Request-ID'))\n log.info('Hello from test!')\n return response.json({\"test\": True})\n\n\nif __name__ == '__main__':\n asyncio.set_event_loop(uvloop.new_event_loop())\n server = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\n loop = asyncio.get_event_loop()\n loop.set_task_factory(context.task_factory)\n task = asyncio.ensure_future(server)\n try:\n loop.run_forever()\n except:\n loop.stop()\n", "path": "examples/log_request_id.py"}, {"content": "from sanic import Sanic\nfrom sanic import response\nfrom signal import signal, SIGINT\nimport asyncio\nimport uvloop\n\napp = Sanic(__name__)\n\n\[email protected](\"/\")\nasync def test(request):\n return response.json({\"answer\": \"42\"})\n\nasyncio.set_event_loop(uvloop.new_event_loop())\nserver = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\nloop = asyncio.get_event_loop()\ntask = asyncio.ensure_future(server)\nsignal(SIGINT, lambda s, f: loop.stop())\ntry:\n loop.run_forever()\nexcept:\n loop.stop()\n", "path": "examples/run_async.py"}]} | 1,564 | 284 |
gh_patches_debug_6849 | rasdani/github-patches | git_diff | WordPress__openverse-api-233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] A circular import prevents starting the project correctly
## Description
<!-- Concisely describe the bug. -->
There is a problem with models imports, run the project and see:
```
web_1 | Exception in thread django-main-thread:
web_1 | Traceback (most recent call last):
web_1 | File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner
web_1 | self.run()
web_1 | File "/usr/local/lib/python3.9/threading.py", line 910, in run
web_1 | self._target(*self._args, **self._kwargs)
web_1 | File "/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py", line 64, in wrapper
web_1 | fn(*args, **kwargs)
web_1 | File "/usr/local/lib/python3.9/site-packages/django/core/management/commands/runserver.py", line 110, in inner_run
web_1 | autoreload.raise_last_exception()
web_1 | File "/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py", line 87, in raise_last_exception
web_1 | raise _exception[1]
web_1 | File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 375, in execute
web_1 | autoreload.check_errors(django.setup)()
web_1 | File "/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py", line 64, in wrapper
web_1 | fn(*args, **kwargs)
web_1 | File "/usr/local/lib/python3.9/site-packages/django/__init__.py", line 24, in setup
web_1 | apps.populate(settings.INSTALLED_APPS)
web_1 | File "/usr/local/lib/python3.9/site-packages/django/apps/registry.py", line 114, in populate
web_1 | app_config.import_models()
web_1 | File "/usr/local/lib/python3.9/site-packages/django/apps/config.py", line 301, in import_models
web_1 | self.models_module = import_module(models_module_name)
web_1 | File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module
web_1 | return _bootstrap._gcd_import(name[level:], package, level)
web_1 | File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
web_1 | File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
web_1 | File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
web_1 | File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
web_1 | File "<frozen importlib._bootstrap_external>", line 850, in exec_module
web_1 | File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
web_1 | File "/openverse-api/catalog/api/models/__init__.py", line 1, in <module>
web_1 | from catalog.api.models.audio import (
web_1 | File "/openverse-api/catalog/api/models/audio.py", line 2, in <module>
web_1 | from catalog.api.models import OpenLedgerModel
web_1 | ImportError: cannot import name 'OpenLedgerModel' from partially initialized module 'catalog.api.models' (most likely due to a circular import) (/openverse-api/catalog/api/models/__init__.py)
```
## Expectation
<!-- Concisely describe what you expected to happen. -->
The project should start without errors and run normally, passing tests.
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
The wrong order is introduced due to the `isort` rules so we should make an exception for these lines or the file.
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openverse-api/catalog/api/models/__init__.py`
Content:
```
1 from catalog.api.models.audio import (
2 AltAudioFile,
3 Audio,
4 AudioList,
5 AudioReport,
6 AudioSet,
7 DeletedAudio,
8 MatureAudio,
9 )
10 from catalog.api.models.base import OpenLedgerModel
11 from catalog.api.models.image import (
12 DeletedImage,
13 Image,
14 ImageList,
15 ImageReport,
16 MatureImage,
17 )
18 from catalog.api.models.media import (
19 DEINDEXED,
20 DMCA,
21 MATURE,
22 MATURE_FILTERED,
23 NO_ACTION,
24 OTHER,
25 PENDING,
26 )
27 from catalog.api.models.models import ContentProvider, ShortenedLink, SourceLogo, Tag
28 from catalog.api.models.oauth import (
29 OAuth2Registration,
30 OAuth2Verification,
31 ThrottledApplication,
32 )
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/openverse-api/catalog/api/models/__init__.py b/openverse-api/catalog/api/models/__init__.py
--- a/openverse-api/catalog/api/models/__init__.py
+++ b/openverse-api/catalog/api/models/__init__.py
@@ -1,3 +1,4 @@
+from catalog.api.models.base import OpenLedgerModel # isort:skip
from catalog.api.models.audio import (
AltAudioFile,
Audio,
@@ -7,7 +8,6 @@
DeletedAudio,
MatureAudio,
)
-from catalog.api.models.base import OpenLedgerModel
from catalog.api.models.image import (
DeletedImage,
Image,
| {"golden_diff": "diff --git a/openverse-api/catalog/api/models/__init__.py b/openverse-api/catalog/api/models/__init__.py\n--- a/openverse-api/catalog/api/models/__init__.py\n+++ b/openverse-api/catalog/api/models/__init__.py\n@@ -1,3 +1,4 @@\n+from catalog.api.models.base import OpenLedgerModel # isort:skip\n from catalog.api.models.audio import (\n AltAudioFile,\n Audio,\n@@ -7,7 +8,6 @@\n DeletedAudio,\n MatureAudio,\n )\n-from catalog.api.models.base import OpenLedgerModel\n from catalog.api.models.image import (\n DeletedImage,\n Image,\n", "issue": "[Bug] A circular import prevents starting the project correctly\n## Description\r\n<!-- Concisely describe the bug. -->\r\nThere is a problem with models imports, run the project and see:\r\n\r\n```\r\nweb_1 | Exception in thread django-main-thread:\r\nweb_1 | Traceback (most recent call last):\r\nweb_1 | File \"/usr/local/lib/python3.9/threading.py\", line 973, in _bootstrap_inner\r\nweb_1 | self.run()\r\nweb_1 | File \"/usr/local/lib/python3.9/threading.py\", line 910, in run\r\nweb_1 | self._target(*self._args, **self._kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py\", line 64, in wrapper\r\nweb_1 | fn(*args, **kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/core/management/commands/runserver.py\", line 110, in inner_run\r\nweb_1 | autoreload.raise_last_exception()\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py\", line 87, in raise_last_exception\r\nweb_1 | raise _exception[1]\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 375, in execute\r\nweb_1 | autoreload.check_errors(django.setup)()\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py\", line 64, in wrapper\r\nweb_1 | fn(*args, **kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/__init__.py\", line 24, in setup\r\nweb_1 | apps.populate(settings.INSTALLED_APPS)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/apps/registry.py\", line 114, in populate\r\nweb_1 | app_config.import_models()\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/apps/config.py\", line 301, in import_models\r\nweb_1 | self.models_module = import_module(models_module_name)\r\nweb_1 | File \"/usr/local/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\nweb_1 | return _bootstrap._gcd_import(name[level:], package, level)\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\nweb_1 | File \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\nweb_1 | File \"/openverse-api/catalog/api/models/__init__.py\", line 1, in <module>\r\nweb_1 | from catalog.api.models.audio import (\r\nweb_1 | File \"/openverse-api/catalog/api/models/audio.py\", line 2, in <module>\r\nweb_1 | from catalog.api.models import OpenLedgerModel\r\nweb_1 | ImportError: cannot import name 'OpenLedgerModel' from partially initialized module 'catalog.api.models' (most likely due to a circular import) (/openverse-api/catalog/api/models/__init__.py)\r\n```\r\n\r\n## Expectation\r\n<!-- Concisely describe what you expected to happen. -->\r\nThe project should start without errors and run normally, passing tests.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem here; or delete the section entirely. -->\r\nThe wrong order is introduced due to the `isort` rules so we should make an exception for these lines or the file.\r\n\r\n## Resolution\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\r\n\n", "before_files": [{"content": "from catalog.api.models.audio import (\n AltAudioFile,\n Audio,\n AudioList,\n AudioReport,\n AudioSet,\n DeletedAudio,\n MatureAudio,\n)\nfrom catalog.api.models.base import OpenLedgerModel\nfrom catalog.api.models.image import (\n DeletedImage,\n Image,\n ImageList,\n ImageReport,\n MatureImage,\n)\nfrom catalog.api.models.media import (\n DEINDEXED,\n DMCA,\n MATURE,\n MATURE_FILTERED,\n NO_ACTION,\n OTHER,\n PENDING,\n)\nfrom catalog.api.models.models import ContentProvider, ShortenedLink, SourceLogo, Tag\nfrom catalog.api.models.oauth import (\n OAuth2Registration,\n OAuth2Verification,\n ThrottledApplication,\n)\n", "path": "openverse-api/catalog/api/models/__init__.py"}], "after_files": [{"content": "from catalog.api.models.base import OpenLedgerModel # isort:skip\nfrom catalog.api.models.audio import (\n AltAudioFile,\n Audio,\n AudioList,\n AudioReport,\n AudioSet,\n DeletedAudio,\n MatureAudio,\n)\nfrom catalog.api.models.image import (\n DeletedImage,\n Image,\n ImageList,\n ImageReport,\n MatureImage,\n)\nfrom catalog.api.models.media import (\n DEINDEXED,\n DMCA,\n MATURE,\n MATURE_FILTERED,\n NO_ACTION,\n OTHER,\n PENDING,\n)\nfrom catalog.api.models.models import ContentProvider, ShortenedLink, SourceLogo, Tag\nfrom catalog.api.models.oauth import (\n OAuth2Registration,\n OAuth2Verification,\n ThrottledApplication,\n)\n", "path": "openverse-api/catalog/api/models/__init__.py"}]} | 1,466 | 139 |
gh_patches_debug_25787 | rasdani/github-patches | git_diff | pypa__setuptools-1905 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TestDepends.testRequire regression in 41.6.0 (py3)
While trying to build the new release of setuptools, I get the following test failure:
```
==================================================================== FAILURES =====================================================================
_____________________________________________________________ TestDepends.testRequire _____________________________________________________________
self = <setuptools.tests.test_setuptools.TestDepends object at 0x7fbfae31d710>
@needs_bytecode
def testRequire(self):
req = Require('Json', '1.0.3', 'json')
assert req.name == 'Json'
assert req.module == 'json'
assert req.requested_version == '1.0.3'
assert req.attribute == '__version__'
assert req.full_name() == 'Json-1.0.3'
from json import __version__
assert req.get_version() == __version__
assert req.version_ok('1.0.9')
assert not req.version_ok('0.9.1')
assert not req.version_ok('unknown')
assert req.is_present()
assert req.is_current()
req = Require('Json 3000', '03000', 'json', format=LooseVersion)
assert req.is_present()
assert not req.is_current()
assert not req.version_ok('unknown')
req = Require('Do-what-I-mean', '1.0', 'd-w-i-m')
assert not req.is_present()
assert not req.is_current()
req = Require('Tests', None, 'tests', homepage="http://example.com")
assert req.format is None
assert req.attribute is None
assert req.requested_version is None
assert req.full_name() == 'Tests'
assert req.homepage == 'http://example.com'
from setuptools.tests import __path__
paths = [os.path.dirname(p) for p in __path__]
> assert req.is_present(paths)
E AssertionError: assert False
E + where False = <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>>(['/tmp/portage/dev-python/setuptools-41.6.0/work/setuptools-41.6.0-python3_5/setuptools'])
E + where <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>> = <setuptools.depends.Require object at 0x7fbfae0d0b38>.is_present
setuptools/tests/test_setuptools.py:120: AssertionError
```
I can reproduce it reliably with at least pypy3.6 (7.2.0) & python3.5 (3.5.7). I haven't tested other versions yet.
Full build log: [dev-python:setuptools-41.6.0:20191030-083347.log](https://github.com/pypa/setuptools/files/3787797/dev-python.setuptools-41.6.0.20191030-083347.log)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setuptools/_imp.py`
Content:
```
1 """
2 Re-implementation of find_module and get_frozen_object
3 from the deprecated imp module.
4 """
5
6 import os
7 import importlib.util
8 import importlib.machinery
9
10 from .py34compat import module_from_spec
11
12
13 PY_SOURCE = 1
14 PY_COMPILED = 2
15 C_EXTENSION = 3
16 C_BUILTIN = 6
17 PY_FROZEN = 7
18
19
20 def find_module(module, paths=None):
21 """Just like 'imp.find_module()', but with package support"""
22 spec = importlib.util.find_spec(module, paths)
23 if spec is None:
24 raise ImportError("Can't find %s" % module)
25 if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
26 spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
27
28 kind = -1
29 file = None
30 static = isinstance(spec.loader, type)
31 if spec.origin == 'frozen' or static and issubclass(
32 spec.loader, importlib.machinery.FrozenImporter):
33 kind = PY_FROZEN
34 path = None # imp compabilty
35 suffix = mode = '' # imp compability
36 elif spec.origin == 'built-in' or static and issubclass(
37 spec.loader, importlib.machinery.BuiltinImporter):
38 kind = C_BUILTIN
39 path = None # imp compabilty
40 suffix = mode = '' # imp compability
41 elif spec.has_location:
42 path = spec.origin
43 suffix = os.path.splitext(path)[1]
44 mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
45
46 if suffix in importlib.machinery.SOURCE_SUFFIXES:
47 kind = PY_SOURCE
48 elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
49 kind = PY_COMPILED
50 elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
51 kind = C_EXTENSION
52
53 if kind in {PY_SOURCE, PY_COMPILED}:
54 file = open(path, mode)
55 else:
56 path = None
57 suffix = mode = ''
58
59 return file, path, (suffix, mode, kind)
60
61
62 def get_frozen_object(module, paths=None):
63 spec = importlib.util.find_spec(module, paths)
64 if not spec:
65 raise ImportError("Can't find %s" % module)
66 return spec.loader.get_code(module)
67
68
69 def get_module(module, paths, info):
70 spec = importlib.util.find_spec(module, paths)
71 if not spec:
72 raise ImportError("Can't find %s" % module)
73 return module_from_spec(spec)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setuptools/_imp.py b/setuptools/_imp.py
--- a/setuptools/_imp.py
+++ b/setuptools/_imp.py
@@ -17,9 +17,18 @@
PY_FROZEN = 7
+def find_spec(module, paths):
+ finder = (
+ importlib.machinery.PathFinder().find_spec
+ if isinstance(paths, list) else
+ importlib.util.find_spec
+ )
+ return finder(module, paths)
+
+
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
- spec = importlib.util.find_spec(module, paths)
+ spec = find_spec(module, paths)
if spec is None:
raise ImportError("Can't find %s" % module)
if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
@@ -60,14 +69,14 @@
def get_frozen_object(module, paths=None):
- spec = importlib.util.find_spec(module, paths)
+ spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return spec.loader.get_code(module)
def get_module(module, paths, info):
- spec = importlib.util.find_spec(module, paths)
+ spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return module_from_spec(spec)
| {"golden_diff": "diff --git a/setuptools/_imp.py b/setuptools/_imp.py\n--- a/setuptools/_imp.py\n+++ b/setuptools/_imp.py\n@@ -17,9 +17,18 @@\n PY_FROZEN = 7\n \n \n+def find_spec(module, paths):\n+ finder = (\n+ importlib.machinery.PathFinder().find_spec\n+ if isinstance(paths, list) else\n+ importlib.util.find_spec\n+ )\n+ return finder(module, paths)\n+\n+\n def find_module(module, paths=None):\n \"\"\"Just like 'imp.find_module()', but with package support\"\"\"\n- spec = importlib.util.find_spec(module, paths)\n+ spec = find_spec(module, paths)\n if spec is None:\n raise ImportError(\"Can't find %s\" % module)\n if not spec.has_location and hasattr(spec, 'submodule_search_locations'):\n@@ -60,14 +69,14 @@\n \n \n def get_frozen_object(module, paths=None):\n- spec = importlib.util.find_spec(module, paths)\n+ spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return spec.loader.get_code(module)\n \n \n def get_module(module, paths, info):\n- spec = importlib.util.find_spec(module, paths)\n+ spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return module_from_spec(spec)\n", "issue": "TestDepends.testRequire regression in 41.6.0 (py3)\nWhile trying to build the new release of setuptools, I get the following test failure:\r\n\r\n```\r\n==================================================================== FAILURES =====================================================================\r\n_____________________________________________________________ TestDepends.testRequire _____________________________________________________________\r\n\r\nself = <setuptools.tests.test_setuptools.TestDepends object at 0x7fbfae31d710>\r\n\r\n @needs_bytecode\r\n def testRequire(self):\r\n req = Require('Json', '1.0.3', 'json')\r\n \r\n assert req.name == 'Json'\r\n assert req.module == 'json'\r\n assert req.requested_version == '1.0.3'\r\n assert req.attribute == '__version__'\r\n assert req.full_name() == 'Json-1.0.3'\r\n \r\n from json import __version__\r\n assert req.get_version() == __version__\r\n assert req.version_ok('1.0.9')\r\n assert not req.version_ok('0.9.1')\r\n assert not req.version_ok('unknown')\r\n \r\n assert req.is_present()\r\n assert req.is_current()\r\n \r\n req = Require('Json 3000', '03000', 'json', format=LooseVersion)\r\n assert req.is_present()\r\n assert not req.is_current()\r\n assert not req.version_ok('unknown')\r\n \r\n req = Require('Do-what-I-mean', '1.0', 'd-w-i-m')\r\n assert not req.is_present()\r\n assert not req.is_current()\r\n \r\n req = Require('Tests', None, 'tests', homepage=\"http://example.com\")\r\n assert req.format is None\r\n assert req.attribute is None\r\n assert req.requested_version is None\r\n assert req.full_name() == 'Tests'\r\n assert req.homepage == 'http://example.com'\r\n \r\n from setuptools.tests import __path__\r\n paths = [os.path.dirname(p) for p in __path__]\r\n> assert req.is_present(paths)\r\nE AssertionError: assert False\r\nE + where False = <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>>(['/tmp/portage/dev-python/setuptools-41.6.0/work/setuptools-41.6.0-python3_5/setuptools'])\r\nE + where <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>> = <setuptools.depends.Require object at 0x7fbfae0d0b38>.is_present\r\n\r\nsetuptools/tests/test_setuptools.py:120: AssertionError\r\n```\r\n\r\nI can reproduce it reliably with at least pypy3.6 (7.2.0) & python3.5 (3.5.7). I haven't tested other versions yet.\r\n\r\nFull build log: [dev-python:setuptools-41.6.0:20191030-083347.log](https://github.com/pypa/setuptools/files/3787797/dev-python.setuptools-41.6.0.20191030-083347.log)\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nRe-implementation of find_module and get_frozen_object\nfrom the deprecated imp module.\n\"\"\"\n\nimport os\nimport importlib.util\nimport importlib.machinery\n\nfrom .py34compat import module_from_spec\n\n\nPY_SOURCE = 1\nPY_COMPILED = 2\nC_EXTENSION = 3\nC_BUILTIN = 6\nPY_FROZEN = 7\n\n\ndef find_module(module, paths=None):\n \"\"\"Just like 'imp.find_module()', but with package support\"\"\"\n spec = importlib.util.find_spec(module, paths)\n if spec is None:\n raise ImportError(\"Can't find %s\" % module)\n if not spec.has_location and hasattr(spec, 'submodule_search_locations'):\n spec = importlib.util.spec_from_loader('__init__.py', spec.loader)\n\n kind = -1\n file = None\n static = isinstance(spec.loader, type)\n if spec.origin == 'frozen' or static and issubclass(\n spec.loader, importlib.machinery.FrozenImporter):\n kind = PY_FROZEN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.origin == 'built-in' or static and issubclass(\n spec.loader, importlib.machinery.BuiltinImporter):\n kind = C_BUILTIN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.has_location:\n path = spec.origin\n suffix = os.path.splitext(path)[1]\n mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'\n\n if suffix in importlib.machinery.SOURCE_SUFFIXES:\n kind = PY_SOURCE\n elif suffix in importlib.machinery.BYTECODE_SUFFIXES:\n kind = PY_COMPILED\n elif suffix in importlib.machinery.EXTENSION_SUFFIXES:\n kind = C_EXTENSION\n\n if kind in {PY_SOURCE, PY_COMPILED}:\n file = open(path, mode)\n else:\n path = None\n suffix = mode = ''\n\n return file, path, (suffix, mode, kind)\n\n\ndef get_frozen_object(module, paths=None):\n spec = importlib.util.find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return spec.loader.get_code(module)\n\n\ndef get_module(module, paths, info):\n spec = importlib.util.find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return module_from_spec(spec)\n", "path": "setuptools/_imp.py"}], "after_files": [{"content": "\"\"\"\nRe-implementation of find_module and get_frozen_object\nfrom the deprecated imp module.\n\"\"\"\n\nimport os\nimport importlib.util\nimport importlib.machinery\n\nfrom .py34compat import module_from_spec\n\n\nPY_SOURCE = 1\nPY_COMPILED = 2\nC_EXTENSION = 3\nC_BUILTIN = 6\nPY_FROZEN = 7\n\n\ndef find_spec(module, paths):\n finder = (\n importlib.machinery.PathFinder().find_spec\n if isinstance(paths, list) else\n importlib.util.find_spec\n )\n return finder(module, paths)\n\n\ndef find_module(module, paths=None):\n \"\"\"Just like 'imp.find_module()', but with package support\"\"\"\n spec = find_spec(module, paths)\n if spec is None:\n raise ImportError(\"Can't find %s\" % module)\n if not spec.has_location and hasattr(spec, 'submodule_search_locations'):\n spec = importlib.util.spec_from_loader('__init__.py', spec.loader)\n\n kind = -1\n file = None\n static = isinstance(spec.loader, type)\n if spec.origin == 'frozen' or static and issubclass(\n spec.loader, importlib.machinery.FrozenImporter):\n kind = PY_FROZEN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.origin == 'built-in' or static and issubclass(\n spec.loader, importlib.machinery.BuiltinImporter):\n kind = C_BUILTIN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.has_location:\n path = spec.origin\n suffix = os.path.splitext(path)[1]\n mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'\n\n if suffix in importlib.machinery.SOURCE_SUFFIXES:\n kind = PY_SOURCE\n elif suffix in importlib.machinery.BYTECODE_SUFFIXES:\n kind = PY_COMPILED\n elif suffix in importlib.machinery.EXTENSION_SUFFIXES:\n kind = C_EXTENSION\n\n if kind in {PY_SOURCE, PY_COMPILED}:\n file = open(path, mode)\n else:\n path = None\n suffix = mode = ''\n\n return file, path, (suffix, mode, kind)\n\n\ndef get_frozen_object(module, paths=None):\n spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return spec.loader.get_code(module)\n\n\ndef get_module(module, paths, info):\n spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return module_from_spec(spec)\n", "path": "setuptools/_imp.py"}]} | 1,659 | 323 |
gh_patches_debug_7219 | rasdani/github-patches | git_diff | spack__spack-18478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nn-c uses invalid self.compiler.pic_flag? (breaks nn-c build, via elmerfem build)
These lines fail, because there is no such member, and looking at other packages, it seems that flags like
```
self.compiler.cc_pic_flag
self.compiler.cxx_pic_flag
self.compiler.fc_pic_flag
#or ?
self.compiler.f77_pic_flag
```
would be appropriate.
https://github.com/spack/spack/blob/601f97d8a50b1840df9b056a34256b6dd2b54ce3/var/spack/repos/builtin/packages/nn-c/package.py#L29-L31
I triggered this on recent `devel` (today) by
```
spack install --test=root elmerfem@devel +mpi +hypre +lua +mumps +openmp +scatt2d +trilinos +zoltan
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/nn-c/package.py`
Content:
```
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class NnC(AutotoolsPackage):
10 """nn: Natural Neighbours interpolation. nn is a C code
11 for Natural Neighbours interpolation of 2D scattered data.
12 It provides a C library and a command line utility nnbathy."""
13
14 homepage = "https://github.com/sakov/nn-c"
15 git = "https://github.com/sakov/nn-c.git"
16
17 version('master', branch='master')
18 version('1.86.2', commit='343c7784d38d3270d75d450569fc0b64767c37e9')
19
20 variant('pic', default=True,
21 description='Produce position-independent code (for shared libs)')
22
23 configure_directory = 'nn'
24
25 def configure_args(self):
26 args = []
27 if '+pic' in self.spec:
28 args.extend([
29 'CFLAGS={0}'.format(self.compiler.pic_flag),
30 'CXXFLAGS={0}'.format(self.compiler.pic_flag),
31 'FFLAGS={0}'.format(self.compiler.pic_flag)
32 ])
33 return args
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/nn-c/package.py b/var/spack/repos/builtin/packages/nn-c/package.py
--- a/var/spack/repos/builtin/packages/nn-c/package.py
+++ b/var/spack/repos/builtin/packages/nn-c/package.py
@@ -26,8 +26,8 @@
args = []
if '+pic' in self.spec:
args.extend([
- 'CFLAGS={0}'.format(self.compiler.pic_flag),
- 'CXXFLAGS={0}'.format(self.compiler.pic_flag),
- 'FFLAGS={0}'.format(self.compiler.pic_flag)
+ 'CFLAGS={0}'.format(self.compiler.cc_pic_flag),
+ 'CXXFLAGS={0}'.format(self.compiler.cxx_pic_flag),
+ 'FFLAGS={0}'.format(self.compiler.fc_pic_flag)
])
return args
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/nn-c/package.py b/var/spack/repos/builtin/packages/nn-c/package.py\n--- a/var/spack/repos/builtin/packages/nn-c/package.py\n+++ b/var/spack/repos/builtin/packages/nn-c/package.py\n@@ -26,8 +26,8 @@\n args = []\n if '+pic' in self.spec:\n args.extend([\n- 'CFLAGS={0}'.format(self.compiler.pic_flag),\n- 'CXXFLAGS={0}'.format(self.compiler.pic_flag),\n- 'FFLAGS={0}'.format(self.compiler.pic_flag)\n+ 'CFLAGS={0}'.format(self.compiler.cc_pic_flag),\n+ 'CXXFLAGS={0}'.format(self.compiler.cxx_pic_flag),\n+ 'FFLAGS={0}'.format(self.compiler.fc_pic_flag)\n ])\n return args\n", "issue": "nn-c uses invalid self.compiler.pic_flag? (breaks nn-c build, via elmerfem build)\nThese lines fail, because there is no such member, and looking at other packages, it seems that flags like\r\n```\r\nself.compiler.cc_pic_flag\r\nself.compiler.cxx_pic_flag\r\nself.compiler.fc_pic_flag\r\n#or ?\r\nself.compiler.f77_pic_flag\r\n```\r\nwould be appropriate.\r\n\r\nhttps://github.com/spack/spack/blob/601f97d8a50b1840df9b056a34256b6dd2b54ce3/var/spack/repos/builtin/packages/nn-c/package.py#L29-L31\r\n\r\nI triggered this on recent `devel` (today) by\r\n```\r\nspack install --test=root elmerfem@devel +mpi +hypre +lua +mumps +openmp +scatt2d +trilinos +zoltan\r\n```\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass NnC(AutotoolsPackage):\n \"\"\"nn: Natural Neighbours interpolation. nn is a C code\n for Natural Neighbours interpolation of 2D scattered data.\n It provides a C library and a command line utility nnbathy.\"\"\"\n\n homepage = \"https://github.com/sakov/nn-c\"\n git = \"https://github.com/sakov/nn-c.git\"\n\n version('master', branch='master')\n version('1.86.2', commit='343c7784d38d3270d75d450569fc0b64767c37e9')\n\n variant('pic', default=True,\n description='Produce position-independent code (for shared libs)')\n\n configure_directory = 'nn'\n\n def configure_args(self):\n args = []\n if '+pic' in self.spec:\n args.extend([\n 'CFLAGS={0}'.format(self.compiler.pic_flag),\n 'CXXFLAGS={0}'.format(self.compiler.pic_flag),\n 'FFLAGS={0}'.format(self.compiler.pic_flag)\n ])\n return args\n", "path": "var/spack/repos/builtin/packages/nn-c/package.py"}], "after_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass NnC(AutotoolsPackage):\n \"\"\"nn: Natural Neighbours interpolation. nn is a C code\n for Natural Neighbours interpolation of 2D scattered data.\n It provides a C library and a command line utility nnbathy.\"\"\"\n\n homepage = \"https://github.com/sakov/nn-c\"\n git = \"https://github.com/sakov/nn-c.git\"\n\n version('master', branch='master')\n version('1.86.2', commit='343c7784d38d3270d75d450569fc0b64767c37e9')\n\n variant('pic', default=True,\n description='Produce position-independent code (for shared libs)')\n\n configure_directory = 'nn'\n\n def configure_args(self):\n args = []\n if '+pic' in self.spec:\n args.extend([\n 'CFLAGS={0}'.format(self.compiler.cc_pic_flag),\n 'CXXFLAGS={0}'.format(self.compiler.cxx_pic_flag),\n 'FFLAGS={0}'.format(self.compiler.fc_pic_flag)\n ])\n return args\n", "path": "var/spack/repos/builtin/packages/nn-c/package.py"}]} | 843 | 187 |
gh_patches_debug_392 | rasdani/github-patches | git_diff | Nitrate__Nitrate-527 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove dependency mock
Use `unittest.mock` instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('VERSION.txt', 'r') as f:
7 pkg_version = f.read().strip()
8
9
10 def get_long_description():
11 with open('README.rst', 'r') as f:
12 return f.read()
13
14
15 install_requires = [
16 'beautifulsoup4 >= 4.1.1',
17 'django >= 2.0,<3.0',
18 'django-contrib-comments == 1.9.1',
19 'django-tinymce == 2.7.0',
20 'django-uuslug == 1.1.8',
21 'html2text',
22 'odfpy >= 0.9.6',
23 'python-bugzilla',
24 'xmltodict',
25 'kobo == 0.9.0'
26 ]
27
28 extras_require = {
29 'mysql': ['mysqlclient >= 1.2.3'],
30 'pgsql': ['psycopg2 == 2.7.5'],
31
32 # Required for tcms.auth.backends.KerberosBackend
33 'krbauth': [
34 'kerberos == 1.2.5'
35 ],
36
37 # Packages for building documentation
38 'docs': [
39 'Sphinx >= 1.1.2',
40 'sphinx_rtd_theme',
41 ],
42
43 # Necessary packages for running tests
44 'tests': [
45 'beautifulsoup4',
46 'coverage',
47 'factory_boy',
48 'flake8',
49 'mock',
50 'pytest',
51 'pytest-cov',
52 'pytest-django',
53 ],
54
55 # Contain tools that assists the development
56 'devtools': [
57 'django-debug-toolbar',
58 'tox',
59 'django-extensions',
60 'pygraphviz',
61 ],
62
63 # Required packages required to run async tasks
64 'async': [
65 'celery == 4.2.0',
66 ],
67
68 'multiauth': [
69 'social-auth-app-django == 3.1.0',
70 ]
71 }
72
73 setup(
74 name='nitrate-tcms',
75 version=pkg_version,
76 description='A full-featured Test Case Management System',
77 long_description=get_long_description(),
78 author='Nitrate Team',
79 maintainer='Chenxiong Qi',
80 maintainer_email='[email protected]',
81 url='https://github.com/Nitrate/Nitrate/',
82 license='GPLv2+',
83 keywords='test case',
84 install_requires=install_requires,
85 extras_require=extras_require,
86 python_requires='>=3.6',
87 package_dir={'': 'src'},
88 packages=find_packages('src', exclude=['test*']),
89 include_package_data=True,
90 zip_safe=False,
91 classifiers=[
92 'Framework :: Django',
93 'Framework :: Django :: 2.0',
94 'Framework :: Django :: 2.1',
95 'Framework :: Django :: 2.2',
96 'Intended Audience :: Developers',
97 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
98 'Programming Language :: Python :: 3',
99 'Programming Language :: Python :: 3.6',
100 'Programming Language :: Python :: 3.7',
101 'Programming Language :: Python :: 3 :: Only',
102 'Topic :: Software Development :: Quality Assurance',
103 'Topic :: Software Development :: Testing',
104 ],
105 project_urls={
106 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
107 'Source Code': 'https://github.com/Nitrate/Nitrate',
108 'Documentation': 'https://nitrate.readthedocs.io/',
109 },
110 )
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,6 @@
'coverage',
'factory_boy',
'flake8',
- 'mock',
'pytest',
'pytest-cov',
'pytest-django',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,6 @@\n 'coverage',\n 'factory_boy',\n 'flake8',\n- 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n", "issue": "Remove dependency mock\nUse `unittest.mock` instead.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]} | 1,278 | 68 |
gh_patches_debug_21464 | rasdani/github-patches | git_diff | netbox-community__netbox-9547 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide Markdown help with links to local documentation pages
### NetBox version
v3.2.4
### Feature type
New functionality
### Proposed functionality
Currently netbox supports a documentation package as part of the main release due to https://github.com/netbox-community/netbox/issues/6328
I propose to change the Markdown assistance available in some text areas ( for example in comments fields) that is currently going to "https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" to another URL as part of the offline documentation package

I propose that a new section in the documentation package is created, based in the github url above, and use the corresponding link within this assistance fields.
The final url could be something like, if this section is placed under references.
https://netboxfqdn/static/docs/reference/markdown/
### Use case
The following use cases are applicable:
Provide the correct documentation that is always related to the specific version being used, instead of the online version that refers the latest version.
Provide access to the documentation to system installed in a isolated management environment that do not have internet access.
### Database changes
none
### External dependencies
none
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/utilities/forms/fields/fields.py`
Content:
```
1 import json
2
3 from django import forms
4 from django.db.models import Count
5 from django.forms.fields import JSONField as _JSONField, InvalidJSONInput
6 from netaddr import AddrFormatError, EUI
7
8 from utilities.forms import widgets
9 from utilities.validators import EnhancedURLValidator
10
11 __all__ = (
12 'ChoiceField',
13 'ColorField',
14 'CommentField',
15 'JSONField',
16 'LaxURLField',
17 'MACAddressField',
18 'MultipleChoiceField',
19 'SlugField',
20 'TagFilterField',
21 )
22
23
24 class CommentField(forms.CharField):
25 """
26 A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.
27 """
28 widget = forms.Textarea
29 # TODO: Port Markdown cheat sheet to internal documentation
30 help_text = """
31 <i class="mdi mdi-information-outline"></i>
32 <a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank" tabindex="-1">
33 Markdown</a> syntax is supported
34 """
35
36 def __init__(self, *, label='', help_text=help_text, required=False, **kwargs):
37 super().__init__(label=label, help_text=help_text, required=required, **kwargs)
38
39
40 class SlugField(forms.SlugField):
41 """
42 Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.
43
44 Parameters:
45 slug_source: Name of the form field from which the slug value will be derived
46 """
47 widget = widgets.SlugWidget
48 help_text = "URL-friendly unique shorthand"
49
50 def __init__(self, *, slug_source='name', help_text=help_text, **kwargs):
51 super().__init__(help_text=help_text, **kwargs)
52
53 self.widget.attrs['slug-source'] = slug_source
54
55
56 class ColorField(forms.CharField):
57 """
58 A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to
59 render choices.
60 """
61 widget = widgets.ColorSelect
62
63
64 class TagFilterField(forms.MultipleChoiceField):
65 """
66 A filter field for the tags of a model. Only the tags used by a model are displayed.
67
68 :param model: The model of the filter
69 """
70 widget = widgets.StaticSelectMultiple
71
72 def __init__(self, model, *args, **kwargs):
73 def get_choices():
74 tags = model.tags.annotate(
75 count=Count('extras_taggeditem_items')
76 ).order_by('name')
77 return [
78 (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags
79 ]
80
81 # Choices are fetched each time the form is initialized
82 super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)
83
84
85 class LaxURLField(forms.URLField):
86 """
87 Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names
88 (e.g. http://myserver/ is valid)
89 """
90 default_validators = [EnhancedURLValidator()]
91
92
93 class JSONField(_JSONField):
94 """
95 Custom wrapper around Django's built-in JSONField to avoid presenting "null" as the default text.
96 """
97 def __init__(self, *args, **kwargs):
98 super().__init__(*args, **kwargs)
99 if not self.help_text:
100 self.help_text = 'Enter context data in <a href="https://json.org/">JSON</a> format.'
101 self.widget.attrs['placeholder'] = ''
102
103 def prepare_value(self, value):
104 if isinstance(value, InvalidJSONInput):
105 return value
106 if value is None:
107 return ''
108 return json.dumps(value, sort_keys=True, indent=4)
109
110
111 class MACAddressField(forms.Field):
112 """
113 Validates a 48-bit MAC address.
114 """
115 widget = forms.CharField
116 default_error_messages = {
117 'invalid': 'MAC address must be in EUI-48 format',
118 }
119
120 def to_python(self, value):
121 value = super().to_python(value)
122
123 # Validate MAC address format
124 try:
125 value = EUI(value.strip())
126 except AddrFormatError:
127 raise forms.ValidationError(self.error_messages['invalid'], code='invalid')
128
129 return value
130
131
132 #
133 # Choice fields
134 #
135
136 class ChoiceField(forms.ChoiceField):
137 """
138 Overrides Django's built-in `ChoiceField` to use NetBox's `StaticSelect` widget
139 """
140 widget = widgets.StaticSelect
141
142
143 class MultipleChoiceField(forms.MultipleChoiceField):
144 """
145 Overrides Django's built-in `MultipleChoiceField` to use NetBox's `StaticSelectMultiple` widget
146 """
147 widget = widgets.StaticSelectMultiple
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py
--- a/netbox/utilities/forms/fields/fields.py
+++ b/netbox/utilities/forms/fields/fields.py
@@ -3,6 +3,7 @@
from django import forms
from django.db.models import Count
from django.forms.fields import JSONField as _JSONField, InvalidJSONInput
+from django.templatetags.static import static
from netaddr import AddrFormatError, EUI
from utilities.forms import widgets
@@ -26,10 +27,9 @@
A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.
"""
widget = forms.Textarea
- # TODO: Port Markdown cheat sheet to internal documentation
- help_text = """
+ help_text = f"""
<i class="mdi mdi-information-outline"></i>
- <a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank" tabindex="-1">
+ <a href="{static('docs/reference/markdown/')}" target="_blank" tabindex="-1">
Markdown</a> syntax is supported
"""
| {"golden_diff": "diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py\n--- a/netbox/utilities/forms/fields/fields.py\n+++ b/netbox/utilities/forms/fields/fields.py\n@@ -3,6 +3,7 @@\n from django import forms\n from django.db.models import Count\n from django.forms.fields import JSONField as _JSONField, InvalidJSONInput\n+from django.templatetags.static import static\n from netaddr import AddrFormatError, EUI\n \n from utilities.forms import widgets\n@@ -26,10 +27,9 @@\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = forms.Textarea\n- # TODO: Port Markdown cheat sheet to internal documentation\n- help_text = \"\"\"\n+ help_text = f\"\"\"\n <i class=\"mdi mdi-information-outline\"></i>\n- <a href=\"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" target=\"_blank\" tabindex=\"-1\">\n+ <a href=\"{static('docs/reference/markdown/')}\" target=\"_blank\" tabindex=\"-1\">\n Markdown</a> syntax is supported\n \"\"\"\n", "issue": "Provide Markdown help with links to local documentation pages\n### NetBox version\n\nv3.2.4\n\n### Feature type\n\nNew functionality\n\n### Proposed functionality\n\nCurrently netbox supports a documentation package as part of the main release due to https://github.com/netbox-community/netbox/issues/6328\r\n\r\nI propose to change the Markdown assistance available in some text areas ( for example in comments fields) that is currently going to \"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" to another URL as part of the offline documentation package \r\n\r\n\r\nI propose that a new section in the documentation package is created, based in the github url above, and use the corresponding link within this assistance fields. \r\n\r\nThe final url could be something like, if this section is placed under references. \r\nhttps://netboxfqdn/static/docs/reference/markdown/\n\n### Use case\n\n\r\n\r\nThe following use cases are applicable:\r\n\r\n Provide the correct documentation that is always related to the specific version being used, instead of the online version that refers the latest version.\r\n Provide access to the documentation to system installed in a isolated management environment that do not have internet access.\r\n\n\n### Database changes\n\nnone\n\n### External dependencies\n\nnone\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ChoiceField',\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'MultipleChoiceField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = forms.Textarea\n # TODO: Port Markdown cheat sheet to internal documentation\n help_text = \"\"\"\n <i class=\"mdi mdi-information-outline\"></i>\n <a href=\"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" target=\"_blank\" tabindex=\"-1\">\n Markdown</a> syntax is supported\n \"\"\"\n\n def __init__(self, *, label='', help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n help_text = \"URL-friendly unique shorthand\"\n\n def __init__(self, *, slug_source='name', help_text=help_text, **kwargs):\n super().__init__(help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n widget = widgets.StaticSelectMultiple\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = 'Enter context data in <a href=\"https://json.org/\">JSON</a> format.'\n self.widget.attrs['placeholder'] = ''\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value is None:\n return ''\n return json.dumps(value, sort_keys=True, indent=4)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': 'MAC address must be in EUI-48 format',\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n\n\n#\n# Choice fields\n#\n\nclass ChoiceField(forms.ChoiceField):\n \"\"\"\n Overrides Django's built-in `ChoiceField` to use NetBox's `StaticSelect` widget\n \"\"\"\n widget = widgets.StaticSelect\n\n\nclass MultipleChoiceField(forms.MultipleChoiceField):\n \"\"\"\n Overrides Django's built-in `MultipleChoiceField` to use NetBox's `StaticSelectMultiple` widget\n \"\"\"\n widget = widgets.StaticSelectMultiple\n", "path": "netbox/utilities/forms/fields/fields.py"}], "after_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom django.templatetags.static import static\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ChoiceField',\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'MultipleChoiceField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = forms.Textarea\n help_text = f\"\"\"\n <i class=\"mdi mdi-information-outline\"></i>\n <a href=\"{static('docs/reference/markdown/')}\" target=\"_blank\" tabindex=\"-1\">\n Markdown</a> syntax is supported\n \"\"\"\n\n def __init__(self, *, label='', help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n help_text = \"URL-friendly unique shorthand\"\n\n def __init__(self, *, slug_source='name', help_text=help_text, **kwargs):\n super().__init__(help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n widget = widgets.StaticSelectMultiple\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = 'Enter context data in <a href=\"https://json.org/\">JSON</a> format.'\n self.widget.attrs['placeholder'] = ''\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value is None:\n return ''\n return json.dumps(value, sort_keys=True, indent=4)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': 'MAC address must be in EUI-48 format',\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n\n\n#\n# Choice fields\n#\n\nclass ChoiceField(forms.ChoiceField):\n \"\"\"\n Overrides Django's built-in `ChoiceField` to use NetBox's `StaticSelect` widget\n \"\"\"\n widget = widgets.StaticSelect\n\n\nclass MultipleChoiceField(forms.MultipleChoiceField):\n \"\"\"\n Overrides Django's built-in `MultipleChoiceField` to use NetBox's `StaticSelectMultiple` widget\n \"\"\"\n widget = widgets.StaticSelectMultiple\n", "path": "netbox/utilities/forms/fields/fields.py"}]} | 1,945 | 265 |
gh_patches_debug_9398 | rasdani/github-patches | git_diff | saulpw__visidata-1890 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fixed width saver truncates data if columns are not fully expanded
**Small description**
If you save or syscopy a table as `fixed` format, and the visible column width is less than the width of the data in the column, the data is truncated. Also, the resulting file is not a valid fixed width format file as the columns are not aligned with the headers.
**Expected result**
Saving or copying a table when the columns are not fully expanded should yield the same result as when the columns are expanded.
**Actual result with screenshot**

**Steps to reproduce with sample data and a .vd**
[test-vd-fixed.zip](https://github.com/saulpw/visidata/files/11217144/test-vd-fixed.zip)
**Additional context**
saul.pw/VisiData v2.11
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/loaders/fixed_width.py`
Content:
```
1
2 from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet
3
4
5 vd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')
6 vd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')
7
8 @VisiData.api
9 def open_fixed(vd, p):
10 return FixedWidthColumnsSheet(p.name, source=p, headerlines=[])
11
12 class FixedWidthColumn(Column):
13 def __init__(self, name, i, j, **kwargs):
14 super().__init__(name, **kwargs)
15 self.i, self.j = i, j
16
17 def calcValue(self, row):
18 return row[0][self.i:self.j]
19
20 def putValue(self, row, value):
21 value = str(value)[:self.j-self.i]
22 j = self.j or len(row)
23 row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]
24
25 def columnize(rows):
26 'Generate (i,j) indexes for fixed-width columns found in rows'
27
28 ## find all character columns that are not spaces ever
29 allNonspaces = set()
30 for r in rows:
31 for i, ch in enumerate(r):
32 if not ch.isspace():
33 allNonspaces.add(i)
34
35 colstart = 0
36 prev = 0
37
38 # collapse fields
39 for i in allNonspaces:
40 if i > prev+1:
41 yield colstart, i
42 colstart = i
43 prev = i
44
45 yield colstart, prev+1 # final column gets rest of line
46
47
48 class FixedWidthColumnsSheet(SequenceSheet):
49 rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)
50 def addRow(self, row, index=None):
51 Sheet.addRow(self, row, index=index)
52
53 def iterload(self):
54 itsource = iter(self.source)
55
56 # compute fixed width columns from first fixed_rows lines
57 maxcols = self.options.fixed_maxcols
58 self.columns = []
59 fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))
60 for i, j in columnize(list(r[0] for r in fixedRows)):
61 if maxcols and self.nCols >= maxcols-1:
62 self.addColumn(FixedWidthColumn('', i, None))
63 break
64 else:
65 self.addColumn(FixedWidthColumn('', i, j))
66
67 yield from fixedRows
68
69 self.setColNames(self.headerlines)
70
71 yield from ([line] for line in itsource)
72
73 def setCols(self, headerlines):
74 self.headerlines = headerlines
75
76
77 @VisiData.api
78 def save_fixed(vd, p, *vsheets):
79 with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:
80 for sheet in vsheets:
81 if len(vsheets) > 1:
82 fp.write('%s\n\n' % sheet.name)
83
84 widths = {} # Column -> width:int
85 # headers
86 for col in Progress(sheet.visibleCols, gerund='sizing'):
87 maxWidth = col.getMaxWidth(sheet.rows)
88 widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth
89 fp.write(('{0:%s} ' % widths[col]).format(col.name))
90 fp.write('\n')
91
92 # rows
93 with Progress(gerund='saving'):
94 for dispvals in sheet.iterdispvals(format=True):
95 for col, val in dispvals.items():
96 fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))
97 fp.write('\n')
98
99 vd.status('%s save finished' % p)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/loaders/fixed_width.py b/visidata/loaders/fixed_width.py
--- a/visidata/loaders/fixed_width.py
+++ b/visidata/loaders/fixed_width.py
@@ -84,8 +84,7 @@
widths = {} # Column -> width:int
# headers
for col in Progress(sheet.visibleCols, gerund='sizing'):
- maxWidth = col.getMaxWidth(sheet.rows)
- widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth
+ widths[col] = col.getMaxWidth(sheet.rows) #1849
fp.write(('{0:%s} ' % widths[col]).format(col.name))
fp.write('\n')
| {"golden_diff": "diff --git a/visidata/loaders/fixed_width.py b/visidata/loaders/fixed_width.py\n--- a/visidata/loaders/fixed_width.py\n+++ b/visidata/loaders/fixed_width.py\n@@ -84,8 +84,7 @@\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n- maxWidth = col.getMaxWidth(sheet.rows)\n- widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth\n+ widths[col] = col.getMaxWidth(sheet.rows) #1849 \n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n", "issue": "fixed width saver truncates data if columns are not fully expanded\n**Small description**\r\n\r\nIf you save or syscopy a table as `fixed` format, and the visible column width is less than the width of the data in the column, the data is truncated. Also, the resulting file is not a valid fixed width format file as the columns are not aligned with the headers.\r\n\r\n**Expected result**\r\n\r\nSaving or copying a table when the columns are not fully expanded should yield the same result as when the columns are expanded.\r\n\r\n**Actual result with screenshot**\r\n\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n[test-vd-fixed.zip](https://github.com/saulpw/visidata/files/11217144/test-vd-fixed.zip)\r\n\r\n**Additional context**\r\nsaul.pw/VisiData v2.11\r\n\n", "before_files": [{"content": "\nfrom visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet\n\n\nvd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')\nvd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')\n\[email protected]\ndef open_fixed(vd, p):\n return FixedWidthColumnsSheet(p.name, source=p, headerlines=[])\n\nclass FixedWidthColumn(Column):\n def __init__(self, name, i, j, **kwargs):\n super().__init__(name, **kwargs)\n self.i, self.j = i, j\n\n def calcValue(self, row):\n return row[0][self.i:self.j]\n\n def putValue(self, row, value):\n value = str(value)[:self.j-self.i]\n j = self.j or len(row)\n row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]\n\ndef columnize(rows):\n 'Generate (i,j) indexes for fixed-width columns found in rows'\n\n ## find all character columns that are not spaces ever\n allNonspaces = set()\n for r in rows:\n for i, ch in enumerate(r):\n if not ch.isspace():\n allNonspaces.add(i)\n\n colstart = 0\n prev = 0\n\n # collapse fields\n for i in allNonspaces:\n if i > prev+1:\n yield colstart, i\n colstart = i\n prev = i\n\n yield colstart, prev+1 # final column gets rest of line\n\n\nclass FixedWidthColumnsSheet(SequenceSheet):\n rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)\n def addRow(self, row, index=None):\n Sheet.addRow(self, row, index=index)\n\n def iterload(self):\n itsource = iter(self.source)\n\n # compute fixed width columns from first fixed_rows lines\n maxcols = self.options.fixed_maxcols\n self.columns = []\n fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))\n for i, j in columnize(list(r[0] for r in fixedRows)):\n if maxcols and self.nCols >= maxcols-1:\n self.addColumn(FixedWidthColumn('', i, None))\n break\n else:\n self.addColumn(FixedWidthColumn('', i, j))\n\n yield from fixedRows\n\n self.setColNames(self.headerlines)\n\n yield from ([line] for line in itsource)\n\n def setCols(self, headerlines):\n self.headerlines = headerlines\n\n\[email protected]\ndef save_fixed(vd, p, *vsheets):\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for sheet in vsheets:\n if len(vsheets) > 1:\n fp.write('%s\\n\\n' % sheet.name)\n\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n maxWidth = col.getMaxWidth(sheet.rows)\n widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth\n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n\n # rows\n with Progress(gerund='saving'):\n for dispvals in sheet.iterdispvals(format=True):\n for col, val in dispvals.items():\n fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))\n fp.write('\\n')\n\n vd.status('%s save finished' % p)\n", "path": "visidata/loaders/fixed_width.py"}], "after_files": [{"content": "\nfrom visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet\n\n\nvd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')\nvd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')\n\[email protected]\ndef open_fixed(vd, p):\n return FixedWidthColumnsSheet(p.name, source=p, headerlines=[])\n\nclass FixedWidthColumn(Column):\n def __init__(self, name, i, j, **kwargs):\n super().__init__(name, **kwargs)\n self.i, self.j = i, j\n\n def calcValue(self, row):\n return row[0][self.i:self.j]\n\n def putValue(self, row, value):\n value = str(value)[:self.j-self.i]\n j = self.j or len(row)\n row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]\n\ndef columnize(rows):\n 'Generate (i,j) indexes for fixed-width columns found in rows'\n\n ## find all character columns that are not spaces ever\n allNonspaces = set()\n for r in rows:\n for i, ch in enumerate(r):\n if not ch.isspace():\n allNonspaces.add(i)\n\n colstart = 0\n prev = 0\n\n # collapse fields\n for i in allNonspaces:\n if i > prev+1:\n yield colstart, i\n colstart = i\n prev = i\n\n yield colstart, prev+1 # final column gets rest of line\n\n\nclass FixedWidthColumnsSheet(SequenceSheet):\n rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)\n def addRow(self, row, index=None):\n Sheet.addRow(self, row, index=index)\n\n def iterload(self):\n itsource = iter(self.source)\n\n # compute fixed width columns from first fixed_rows lines\n maxcols = self.options.fixed_maxcols\n self.columns = []\n fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))\n for i, j in columnize(list(r[0] for r in fixedRows)):\n if maxcols and self.nCols >= maxcols-1:\n self.addColumn(FixedWidthColumn('', i, None))\n break\n else:\n self.addColumn(FixedWidthColumn('', i, j))\n\n yield from fixedRows\n\n self.setColNames(self.headerlines)\n\n yield from ([line] for line in itsource)\n\n def setCols(self, headerlines):\n self.headerlines = headerlines\n\n\[email protected]\ndef save_fixed(vd, p, *vsheets):\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for sheet in vsheets:\n if len(vsheets) > 1:\n fp.write('%s\\n\\n' % sheet.name)\n\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n widths[col] = col.getMaxWidth(sheet.rows) #1849 \n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n\n # rows\n with Progress(gerund='saving'):\n for dispvals in sheet.iterdispvals(format=True):\n for col, val in dispvals.items():\n fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))\n fp.write('\\n')\n\n vd.status('%s save finished' % p)\n", "path": "visidata/loaders/fixed_width.py"}]} | 1,544 | 164 |
gh_patches_debug_4240 | rasdani/github-patches | git_diff | liqd__adhocracy4-210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keep html time field optional even if a DateTimeField is set to be required
Time is optional in the backend but the html input field still gets the required attribute if the the DateTimeField is initializes with `required=True`
The time Widget should always be initialized without required.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `adhocracy4/forms/widgets.py`
Content:
```
1 import datetime
2
3 from django.contrib.staticfiles.storage import staticfiles_storage
4 from django.forms import widgets as form_widgets
5 from django.template.loader import render_to_string
6 from django.utils.timezone import localtime
7
8
9 class DateTimeInput(form_widgets.SplitDateTimeWidget):
10 def __init__(self, time_label='', time_default=None, *args, **kwargs):
11 super().__init__(*args, **kwargs)
12 self.time_label = time_label
13 self.time_default = time_default or datetime.time(hour=0, minute=0)
14
15 class Media:
16 js = (
17 staticfiles_storage.url('datepicker.js'),
18 )
19 css = {'all': [
20 staticfiles_storage.url('datepicker.css'),
21 ]}
22
23 def render(self, name, value, attrs=None):
24 date_attrs = self.build_attrs(attrs)
25 date_attrs.update({
26 'class': 'datepicker',
27 'placeholder': self.widgets[0].format_value(datetime.date.today()),
28 'id': attrs['id'] + '_date'
29 })
30 time_attrs = self.build_attrs(attrs)
31 time_attrs.update({
32 'class': 'timepicker',
33 'placeholder': self.widgets[1].format_value(
34 self.get_default_time()),
35 'id': attrs['id'] + '_time'
36 })
37
38 if isinstance(value, datetime.datetime):
39 value = localtime(value)
40 date = value.date()
41 time = value.time()
42 else:
43 # value's just a list in case of an error
44 date = value[0] if value else None
45 time = value[1] if value else None
46
47 return render_to_string(
48 'a4forms/datetime_input.html', {
49 'date': self.widgets[0].render(
50 name + '_0',
51 date,
52 date_attrs
53 ),
54 'time': self.widgets[1].render(
55 name + '_1',
56 time,
57 time_attrs
58 ),
59 'time_label': {
60 'label': self.time_label,
61 'id_for_label': attrs['id'] + '_time'
62 },
63 })
64
65 def id_for_label(self, id_):
66 if id_:
67 id_ += '_date'
68 return id_
69
70 def get_default_time(self):
71 time_widget = self.widgets[1]
72
73 if not self.time_default:
74 return time_widget.format_value(datetime.time(hour=0, minute=0))
75 elif isinstance(self.time_default, (datetime.time, datetime.datetime)):
76 return time_widget.format_value(self.time_default)
77 else:
78 return self.time_default
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/adhocracy4/forms/widgets.py b/adhocracy4/forms/widgets.py
--- a/adhocracy4/forms/widgets.py
+++ b/adhocracy4/forms/widgets.py
@@ -32,7 +32,8 @@
'class': 'timepicker',
'placeholder': self.widgets[1].format_value(
self.get_default_time()),
- 'id': attrs['id'] + '_time'
+ 'id': attrs['id'] + '_time',
+ 'required': False
})
if isinstance(value, datetime.datetime):
| {"golden_diff": "diff --git a/adhocracy4/forms/widgets.py b/adhocracy4/forms/widgets.py\n--- a/adhocracy4/forms/widgets.py\n+++ b/adhocracy4/forms/widgets.py\n@@ -32,7 +32,8 @@\n 'class': 'timepicker',\n 'placeholder': self.widgets[1].format_value(\n self.get_default_time()),\n- 'id': attrs['id'] + '_time'\n+ 'id': attrs['id'] + '_time',\n+ 'required': False\n })\n \n if isinstance(value, datetime.datetime):\n", "issue": "Keep html time field optional even if a DateTimeField is set to be required\nTime is optional in the backend but the html input field still gets the required attribute if the the DateTimeField is initializes with `required=True`\r\nThe time Widget should always be initialized without required.\n", "before_files": [{"content": "import datetime\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets as form_widgets\nfrom django.template.loader import render_to_string\nfrom django.utils.timezone import localtime\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n def __init__(self, time_label='', time_default=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.time_label = time_label\n self.time_default = time_default or datetime.time(hour=0, minute=0)\n\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': self.widgets[0].format_value(datetime.date.today()),\n 'id': attrs['id'] + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': self.widgets[1].format_value(\n self.get_default_time()),\n 'id': attrs['id'] + '_time'\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else None\n\n return render_to_string(\n 'a4forms/datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n ),\n 'time_label': {\n 'label': self.time_label,\n 'id_for_label': attrs['id'] + '_time'\n },\n })\n\n def id_for_label(self, id_):\n if id_:\n id_ += '_date'\n return id_\n\n def get_default_time(self):\n time_widget = self.widgets[1]\n\n if not self.time_default:\n return time_widget.format_value(datetime.time(hour=0, minute=0))\n elif isinstance(self.time_default, (datetime.time, datetime.datetime)):\n return time_widget.format_value(self.time_default)\n else:\n return self.time_default\n", "path": "adhocracy4/forms/widgets.py"}], "after_files": [{"content": "import datetime\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets as form_widgets\nfrom django.template.loader import render_to_string\nfrom django.utils.timezone import localtime\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n def __init__(self, time_label='', time_default=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.time_label = time_label\n self.time_default = time_default or datetime.time(hour=0, minute=0)\n\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': self.widgets[0].format_value(datetime.date.today()),\n 'id': attrs['id'] + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': self.widgets[1].format_value(\n self.get_default_time()),\n 'id': attrs['id'] + '_time',\n 'required': False\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else None\n\n return render_to_string(\n 'a4forms/datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n ),\n 'time_label': {\n 'label': self.time_label,\n 'id_for_label': attrs['id'] + '_time'\n },\n })\n\n def id_for_label(self, id_):\n if id_:\n id_ += '_date'\n return id_\n\n def get_default_time(self):\n time_widget = self.widgets[1]\n\n if not self.time_default:\n return time_widget.format_value(datetime.time(hour=0, minute=0))\n elif isinstance(self.time_default, (datetime.time, datetime.datetime)):\n return time_widget.format_value(self.time_default)\n else:\n return self.time_default\n", "path": "adhocracy4/forms/widgets.py"}]} | 995 | 122 |
gh_patches_debug_24120 | rasdani/github-patches | git_diff | conan-io__conan-center-index-11233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[request] kcov/40
### Package Details
* Package Name/Version: **kcov/40**
* Changelog: **https://github.com/SimonKagstrom/kcov/blob/master/ChangeLog**
Hello,
Currently conan-center provides only 38 release, I would like to have latest release (40) also available.
I'll provides a pull request.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/kcov/all/conanfile.py`
Content:
```
1 import os
2 from conans import ConanFile, CMake, tools
3 from conans.errors import ConanInvalidConfiguration
4
5
6 class KcovConan(ConanFile):
7 name = "kcov"
8 license = "GPL-2.0"
9 url = "https://github.com/conan-io/conan-center-index/"
10 homepage = "http://simonkagstrom.github.io/kcov/index.html"
11 description = "Code coverage tool for compiled programs, Python and Bash\
12 which uses debugging information to collect and report data without\
13 special compilation options"
14 topics = ("coverage", "linux", "debug")
15 settings = "os", "compiler", "build_type", "arch"
16 exports_sources = "CMakeLists.txt", "patches/**"
17 requires = ["zlib/1.2.11",
18 "libiberty/9.1.0",
19 "libcurl/7.64.1",
20 "elfutils/0.180"]
21 generators = "cmake"
22 _cmake = None
23 _source_subfolder = "source_subfolder"
24 _build_subfolder = "build_subfolder"
25
26 def configure(self):
27 if self.settings.os == "Windows":
28 raise ConanInvalidConfiguration(
29 "kcov can not be built on windows.")
30
31 def source(self):
32 tools.get(**self.conan_data["sources"][self.version])
33 extracted_dir = self.name + "-" + self.version
34 os.rename(extracted_dir, self._source_subfolder)
35
36 def _patch_sources(self):
37 for patch in self.conan_data["patches"][self.version]:
38 tools.patch(**patch)
39
40 def _configure_cmake(self):
41 if self._cmake is not None:
42 return self._cmake
43 self._cmake = CMake(self)
44 self._cmake.configure(build_folder=self._build_subfolder)
45 return self._cmake
46
47 def build(self):
48 self._patch_sources()
49 cmake = self._configure_cmake()
50 cmake.build()
51
52 def package(self):
53 cmake = self._configure_cmake()
54 cmake.install()
55 tools.rmdir(os.path.join(self.package_folder, "share"))
56 self.copy("COPYING*", dst="licenses", src=self._source_subfolder)
57
58 def package_info(self):
59 bindir = os.path.join(self.package_folder, "bin")
60 self.output.info("Appending PATH environment variable: {}"
61 .format(bindir))
62 self.env_info.PATH.append(bindir)
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/kcov/all/conanfile.py b/recipes/kcov/all/conanfile.py
--- a/recipes/kcov/all/conanfile.py
+++ b/recipes/kcov/all/conanfile.py
@@ -1,8 +1,8 @@
import os
-from conans import ConanFile, CMake, tools
+from conan import ConanFile
+from conans import CMake, tools
from conans.errors import ConanInvalidConfiguration
-
class KcovConan(ConanFile):
name = "kcov"
license = "GPL-2.0"
@@ -14,9 +14,9 @@
topics = ("coverage", "linux", "debug")
settings = "os", "compiler", "build_type", "arch"
exports_sources = "CMakeLists.txt", "patches/**"
- requires = ["zlib/1.2.11",
+ requires = ["zlib/1.2.12",
"libiberty/9.1.0",
- "libcurl/7.64.1",
+ "libcurl/7.83.1",
"elfutils/0.180"]
generators = "cmake"
_cmake = None
@@ -60,3 +60,4 @@
self.output.info("Appending PATH environment variable: {}"
.format(bindir))
self.env_info.PATH.append(bindir)
+ self.cpp_info.includedirs = []
| {"golden_diff": "diff --git a/recipes/kcov/all/conanfile.py b/recipes/kcov/all/conanfile.py\n--- a/recipes/kcov/all/conanfile.py\n+++ b/recipes/kcov/all/conanfile.py\n@@ -1,8 +1,8 @@\n import os\n-from conans import ConanFile, CMake, tools\n+from conan import ConanFile\n+from conans import CMake, tools\n from conans.errors import ConanInvalidConfiguration\n \n-\n class KcovConan(ConanFile):\n name = \"kcov\"\n license = \"GPL-2.0\"\n@@ -14,9 +14,9 @@\n topics = (\"coverage\", \"linux\", \"debug\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n- requires = [\"zlib/1.2.11\",\n+ requires = [\"zlib/1.2.12\",\n \"libiberty/9.1.0\",\n- \"libcurl/7.64.1\",\n+ \"libcurl/7.83.1\",\n \"elfutils/0.180\"]\n generators = \"cmake\"\n _cmake = None\n@@ -60,3 +60,4 @@\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n+ self.cpp_info.includedirs = []\n", "issue": "[request] kcov/40\n### Package Details\r\n * Package Name/Version: **kcov/40**\r\n * Changelog: **https://github.com/SimonKagstrom/kcov/blob/master/ChangeLog**\r\n\r\nHello,\r\n\r\nCurrently conan-center provides only 38 release, I would like to have latest release (40) also available.\r\nI'll provides a pull request.\r\n\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass KcovConan(ConanFile):\n name = \"kcov\"\n license = \"GPL-2.0\"\n url = \"https://github.com/conan-io/conan-center-index/\"\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n description = \"Code coverage tool for compiled programs, Python and Bash\\\n which uses debugging information to collect and report data without\\\n special compilation options\"\n topics = (\"coverage\", \"linux\", \"debug\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n requires = [\"zlib/1.2.11\",\n \"libiberty/9.1.0\",\n \"libcurl/7.64.1\",\n \"elfutils/0.180\"]\n generators = \"cmake\"\n _cmake = None\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\n \"kcov can not be built on windows.\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake is not None:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n self.copy(\"COPYING*\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/kcov/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conan import ConanFile\nfrom conans import CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nclass KcovConan(ConanFile):\n name = \"kcov\"\n license = \"GPL-2.0\"\n url = \"https://github.com/conan-io/conan-center-index/\"\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n description = \"Code coverage tool for compiled programs, Python and Bash\\\n which uses debugging information to collect and report data without\\\n special compilation options\"\n topics = (\"coverage\", \"linux\", \"debug\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n requires = [\"zlib/1.2.12\",\n \"libiberty/9.1.0\",\n \"libcurl/7.83.1\",\n \"elfutils/0.180\"]\n generators = \"cmake\"\n _cmake = None\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\n \"kcov can not be built on windows.\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake is not None:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n self.copy(\"COPYING*\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n self.cpp_info.includedirs = []\n", "path": "recipes/kcov/all/conanfile.py"}]} | 999 | 325 |
gh_patches_debug_8416 | rasdani/github-patches | git_diff | optuna__optuna-449 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError when calling suggest_categorical with int and str
**Conditions**
- Optuna version: 0.13.0
- Python version: 3.7.3
- OS: Windows 10 Education
- Machine Learning library to be optimized: none
**Code to reproduce**
```
def objective(trial: optuna.Trial):
x = trial.suggest_categorical("x", [1, "0"])
print(x)
optuna.create_study( study_name="test_" + now_string(), storage="sqlite:///tmp/example.db").optimize(objective, n_trials=10)
```
**Error messages, stack traces, or logs**
```
Traceback (most recent call last):
File "C:\Users\imri\github\scoring-model\venv\lib\site-packages\optuna\study.py", line 468, in _run_trial
result = func(trial)
File "~\github\scoring-model\tests\TestOptuna.py", line 12, in objective
x = trial.suggest_categorical("x", [1, "0"])
File "~\github\scoring-model\venv\lib\site-packages\optuna\trial.py", line 337, in suggest_categorical
return self._suggest(name, distributions.CategoricalDistribution(choices=choices))
File "~\github\scoring-model\venv\lib\site-packages\optuna\trial.py", line 457, in _suggest
return self._set_new_param_or_get_existing(name, param_value, distribution)
File "~\github\scoring-model\venv\lib\site-packages\optuna\trial.py", line 462, in _set_new_param_or_get_existing
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
File "~\github\scoring-model\venv\lib\site-packages\optuna\distributions.py", line 236, in to_internal_repr
return self.choices.index(param_value_in_external_repr)
ValueError: tuple.index(x): x not in tuple
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/samplers/random.py`
Content:
```
1 import numpy
2
3 from optuna import distributions
4 from optuna.samplers.base import BaseSampler
5 from optuna import types
6
7 if types.TYPE_CHECKING:
8 from typing import Any # NOQA
9 from typing import Dict # NOQA
10 from typing import Optional # NOQA
11
12 from optuna.distributions import BaseDistribution # NOQA
13 from optuna.structs import FrozenTrial # NOQA
14 from optuna.study import InTrialStudy # NOQA
15
16
17 class RandomSampler(BaseSampler):
18 """Sampler using random sampling.
19
20 Example:
21
22 .. code::
23
24 >>> study = optuna.create_study(sampler=RandomSampler())
25 >>> study.optimize(objective, direction='minimize')
26
27 Args:
28 seed: Seed for random number generator.
29 """
30
31 def __init__(self, seed=None):
32 # type: (Optional[int]) -> None
33
34 self.seed = seed
35 self.rng = numpy.random.RandomState(seed)
36
37 def infer_relative_search_space(self, study, trial):
38 # type: (InTrialStudy, FrozenTrial) -> Dict[str, BaseDistribution]
39
40 return {}
41
42 def sample_relative(self, study, trial, search_space):
43 # type: (InTrialStudy, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
44
45 return {}
46
47 def sample_independent(self, study, trial, param_name, param_distribution):
48 # type: (InTrialStudy, FrozenTrial, str, distributions.BaseDistribution) -> Any
49 """Please consult the documentation for :func:`BaseSampler.sample_independent`."""
50
51 if isinstance(param_distribution, distributions.UniformDistribution):
52 return self.rng.uniform(param_distribution.low, param_distribution.high)
53 elif isinstance(param_distribution, distributions.LogUniformDistribution):
54 log_low = numpy.log(param_distribution.low)
55 log_high = numpy.log(param_distribution.high)
56 return float(numpy.exp(self.rng.uniform(log_low, log_high)))
57 elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
58 q = param_distribution.q
59 r = param_distribution.high - param_distribution.low
60 # [low, high] is shifted to [0, r] to align sampled values at regular intervals.
61 low = 0 - 0.5 * q
62 high = r + 0.5 * q
63 s = self.rng.uniform(low, high)
64 v = numpy.round(s / q) * q + param_distribution.low
65 # v may slightly exceed range due to round-off errors.
66 return float(min(max(v, param_distribution.low), param_distribution.high))
67 elif isinstance(param_distribution, distributions.IntUniformDistribution):
68 # numpy.random.randint includes low but excludes high.
69 return self.rng.randint(param_distribution.low, param_distribution.high + 1)
70 elif isinstance(param_distribution, distributions.CategoricalDistribution):
71 choices = param_distribution.choices
72 return self.rng.choice(choices)
73 else:
74 raise NotImplementedError
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/samplers/random.py b/optuna/samplers/random.py
--- a/optuna/samplers/random.py
+++ b/optuna/samplers/random.py
@@ -69,6 +69,7 @@
return self.rng.randint(param_distribution.low, param_distribution.high + 1)
elif isinstance(param_distribution, distributions.CategoricalDistribution):
choices = param_distribution.choices
- return self.rng.choice(choices)
+ index = self.rng.randint(0, len(choices))
+ return choices[index]
else:
raise NotImplementedError
| {"golden_diff": "diff --git a/optuna/samplers/random.py b/optuna/samplers/random.py\n--- a/optuna/samplers/random.py\n+++ b/optuna/samplers/random.py\n@@ -69,6 +69,7 @@\n return self.rng.randint(param_distribution.low, param_distribution.high + 1)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n- return self.rng.choice(choices)\n+ index = self.rng.randint(0, len(choices))\n+ return choices[index]\n else:\n raise NotImplementedError\n", "issue": "ValueError when calling suggest_categorical with int and str\n**Conditions**\r\n- Optuna version: 0.13.0\r\n- Python version: 3.7.3\r\n- OS: Windows 10 Education\r\n- Machine Learning library to be optimized: none\r\n\r\n**Code to reproduce**\r\n```\r\ndef objective(trial: optuna.Trial):\r\n x = trial.suggest_categorical(\"x\", [1, \"0\"])\r\n print(x)\r\noptuna.create_study( study_name=\"test_\" + now_string(), storage=\"sqlite:///tmp/example.db\").optimize(objective, n_trials=10)\r\n```\r\n\r\n**Error messages, stack traces, or logs**\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\imri\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\study.py\", line 468, in _run_trial\r\n result = func(trial)\r\n File \"~\\github\\scoring-model\\tests\\TestOptuna.py\", line 12, in objective\r\n x = trial.suggest_categorical(\"x\", [1, \"0\"])\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\trial.py\", line 337, in suggest_categorical\r\n return self._suggest(name, distributions.CategoricalDistribution(choices=choices))\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\trial.py\", line 457, in _suggest\r\n return self._set_new_param_or_get_existing(name, param_value, distribution)\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\trial.py\", line 462, in _set_new_param_or_get_existing\r\n param_value_in_internal_repr = distribution.to_internal_repr(param_value)\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\distributions.py\", line 236, in to_internal_repr\r\n return self.choices.index(param_value_in_external_repr)\r\nValueError: tuple.index(x): x not in tuple\r\n```\r\n\r\n\n", "before_files": [{"content": "import numpy\n\nfrom optuna import distributions\nfrom optuna.samplers.base import BaseSampler\nfrom optuna import types\n\nif types.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Dict # NOQA\n from typing import Optional # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.structs import FrozenTrial # NOQA\n from optuna.study import InTrialStudy # NOQA\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"Sampler using random sampling.\n\n Example:\n\n .. code::\n\n >>> study = optuna.create_study(sampler=RandomSampler())\n >>> study.optimize(objective, direction='minimize')\n\n Args:\n seed: Seed for random number generator.\n \"\"\"\n\n def __init__(self, seed=None):\n # type: (Optional[int]) -> None\n\n self.seed = seed\n self.rng = numpy.random.RandomState(seed)\n\n def infer_relative_search_space(self, study, trial):\n # type: (InTrialStudy, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (InTrialStudy, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (InTrialStudy, FrozenTrial, str, distributions.BaseDistribution) -> Any\n \"\"\"Please consult the documentation for :func:`BaseSampler.sample_independent`.\"\"\"\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self.rng.uniform(param_distribution.low, param_distribution.high)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n log_low = numpy.log(param_distribution.low)\n log_high = numpy.log(param_distribution.high)\n return float(numpy.exp(self.rng.uniform(log_low, log_high)))\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n q = param_distribution.q\n r = param_distribution.high - param_distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n s = self.rng.uniform(low, high)\n v = numpy.round(s / q) * q + param_distribution.low\n # v may slightly exceed range due to round-off errors.\n return float(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n # numpy.random.randint includes low but excludes high.\n return self.rng.randint(param_distribution.low, param_distribution.high + 1)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n return self.rng.choice(choices)\n else:\n raise NotImplementedError\n", "path": "optuna/samplers/random.py"}], "after_files": [{"content": "import numpy\n\nfrom optuna import distributions\nfrom optuna.samplers.base import BaseSampler\nfrom optuna import types\n\nif types.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Dict # NOQA\n from typing import Optional # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.structs import FrozenTrial # NOQA\n from optuna.study import InTrialStudy # NOQA\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"Sampler using random sampling.\n\n Example:\n\n .. code::\n\n >>> study = optuna.create_study(sampler=RandomSampler())\n >>> study.optimize(objective, direction='minimize')\n\n Args:\n seed: Seed for random number generator.\n \"\"\"\n\n def __init__(self, seed=None):\n # type: (Optional[int]) -> None\n\n self.seed = seed\n self.rng = numpy.random.RandomState(seed)\n\n def infer_relative_search_space(self, study, trial):\n # type: (InTrialStudy, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (InTrialStudy, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (InTrialStudy, FrozenTrial, str, distributions.BaseDistribution) -> Any\n \"\"\"Please consult the documentation for :func:`BaseSampler.sample_independent`.\"\"\"\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self.rng.uniform(param_distribution.low, param_distribution.high)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n log_low = numpy.log(param_distribution.low)\n log_high = numpy.log(param_distribution.high)\n return float(numpy.exp(self.rng.uniform(log_low, log_high)))\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n q = param_distribution.q\n r = param_distribution.high - param_distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n s = self.rng.uniform(low, high)\n v = numpy.round(s / q) * q + param_distribution.low\n # v may slightly exceed range due to round-off errors.\n return float(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n # numpy.random.randint includes low but excludes high.\n return self.rng.randint(param_distribution.low, param_distribution.high + 1)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n index = self.rng.randint(0, len(choices))\n return choices[index]\n else:\n raise NotImplementedError\n", "path": "optuna/samplers/random.py"}]} | 1,477 | 123 |
gh_patches_debug_24056 | rasdani/github-patches | git_diff | pypi__warehouse-2574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve sorting on simple page
I'd like to submit a patch for this but I have a few questions :)
First I'll describe what I'd like to do...
## sort by version number
See https://pypi.org/simple/pre-commit/
You'll notice that `0.10.0` erroneously sorts *before* `0.2.0` (I'd like to fix this)
## investigation
I've found the code which does this sorting [here](https://github.com/pypa/warehouse/blob/3bdfe5a89cc9a922ee97304c98384c24822a09ee/warehouse/legacy/api/simple.py#L76-L89)
This seems to just sort by filename, but by inspecting and viewing [this page](https://pypi.org/simple/pre-commit-mirror-maker/) I notice it seems to ignore `_` vs. `-` (which is good, that's what I want to continue to happen but I'm just not seeing it from the code!)
## other questions
The `File` objects which come back from the database contain a `.version` attribute that I'd like to use to participate in sorting, my main question is: **Can I depend on this version to be a valid [PEP440](https://www.python.org/dev/peps/pep-0440/) version and use something like `pkg_resources.parse_version`?**
I'd basically like to replicate something close to the sorting which @chriskuehl's [dumb-pypi](https://github.com/chriskuehl/dumb-pypi) does [here](https://github.com/chriskuehl/dumb-pypi/blob/fd0f93fc2e82cbd9bae41b3c60c5f006b2319c60/dumb_pypi/main.py#L77-L91).
Thanks in advance :)
---
**Good First Issue**: This issue is good for first time contributors. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, please feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/legacy/api/simple.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently
14 from pyramid.view import view_config
15 from sqlalchemy import func
16 from sqlalchemy.orm import joinedload
17
18 from warehouse.cache.http import cache_control
19 from warehouse.cache.origin import origin_cache
20 from warehouse.packaging.models import JournalEntry, File, Project, Release
21
22
23 @view_config(
24 route_name="legacy.api.simple.index",
25 renderer="legacy/api/simple/index.html",
26 decorator=[
27 cache_control(10 * 60), # 10 minutes
28 origin_cache(
29 1 * 24 * 60 * 60, # 1 day
30 stale_while_revalidate=5 * 60, # 5 minutes
31 stale_if_error=1 * 24 * 60 * 60, # 1 day
32 ),
33 ],
34 )
35 def simple_index(request):
36 # Get the latest serial number
37 serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0
38 request.response.headers["X-PyPI-Last-Serial"] = str(serial)
39
40 # Fetch the name and normalized name for all of our projects
41 projects = (
42 request.db.query(Project.name, Project.normalized_name)
43 .order_by(Project.normalized_name)
44 .all()
45 )
46
47 return {"projects": projects}
48
49
50 @view_config(
51 route_name="legacy.api.simple.detail",
52 renderer="legacy/api/simple/detail.html",
53 decorator=[
54 cache_control(10 * 60), # 10 minutes
55 origin_cache(
56 1 * 24 * 60 * 60, # 1 day
57 stale_while_revalidate=5 * 60, # 5 minutes
58 stale_if_error=1 * 24 * 60 * 60, # 1 day
59 ),
60 ],
61 )
62 def simple_detail(project, request):
63 # TODO: Handle files which are not hosted on PyPI
64
65 # Make sure that we're using the normalized version of the URL.
66 if (project.normalized_name !=
67 request.matchdict.get("name", project.normalized_name)):
68 return HTTPMovedPermanently(
69 request.current_route_path(name=project.normalized_name),
70 )
71
72 # Get the latest serial number for this project.
73 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
74
75 # Get all of the files for this project.
76 files = (
77 request.db.query(File)
78 .options(joinedload(File.release))
79 .filter(
80 File.name == project.name,
81 File.version.in_(
82 request.db.query(Release)
83 .filter(Release.project == project)
84 .with_entities(Release.version)
85 )
86 )
87 .order_by(File.filename)
88 .all()
89 )
90
91 return {"project": project, "files": files}
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py
--- a/warehouse/legacy/api/simple.py
+++ b/warehouse/legacy/api/simple.py
@@ -10,6 +10,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+
+from packaging.version import parse
from pyramid.httpexceptions import HTTPMovedPermanently
from pyramid.view import view_config
from sqlalchemy import func
@@ -73,7 +75,7 @@
request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
# Get all of the files for this project.
- files = (
+ files = sorted(
request.db.query(File)
.options(joinedload(File.release))
.filter(
@@ -84,8 +86,8 @@
.with_entities(Release.version)
)
)
- .order_by(File.filename)
- .all()
+ .all(),
+ key=lambda f: (parse(f.version), f.packagetype)
)
return {"project": project, "files": files}
| {"golden_diff": "diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py\n--- a/warehouse/legacy/api/simple.py\n+++ b/warehouse/legacy/api/simple.py\n@@ -10,6 +10,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+\n+from packaging.version import parse\n from pyramid.httpexceptions import HTTPMovedPermanently\n from pyramid.view import view_config\n from sqlalchemy import func\n@@ -73,7 +75,7 @@\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n \n # Get all of the files for this project.\n- files = (\n+ files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n@@ -84,8 +86,8 @@\n .with_entities(Release.version)\n )\n )\n- .order_by(File.filename)\n- .all()\n+ .all(),\n+ key=lambda f: (parse(f.version), f.packagetype)\n )\n \n return {\"project\": project, \"files\": files}\n", "issue": "Improve sorting on simple page\nI'd like to submit a patch for this but I have a few questions :)\r\n\r\nFirst I'll describe what I'd like to do...\r\n\r\n## sort by version number\r\n\r\nSee https://pypi.org/simple/pre-commit/\r\n\r\nYou'll notice that `0.10.0` erroneously sorts *before* `0.2.0` (I'd like to fix this)\r\n\r\n## investigation\r\n\r\nI've found the code which does this sorting [here](https://github.com/pypa/warehouse/blob/3bdfe5a89cc9a922ee97304c98384c24822a09ee/warehouse/legacy/api/simple.py#L76-L89)\r\n\r\nThis seems to just sort by filename, but by inspecting and viewing [this page](https://pypi.org/simple/pre-commit-mirror-maker/) I notice it seems to ignore `_` vs. `-` (which is good, that's what I want to continue to happen but I'm just not seeing it from the code!)\r\n\r\n## other questions\r\n\r\nThe `File` objects which come back from the database contain a `.version` attribute that I'd like to use to participate in sorting, my main question is: **Can I depend on this version to be a valid [PEP440](https://www.python.org/dev/peps/pep-0440/) version and use something like `pkg_resources.parse_version`?**\r\n\r\nI'd basically like to replicate something close to the sorting which @chriskuehl's [dumb-pypi](https://github.com/chriskuehl/dumb-pypi) does [here](https://github.com/chriskuehl/dumb-pypi/blob/fd0f93fc2e82cbd9bae41b3c60c5f006b2319c60/dumb_pypi/main.py#L77-L91).\r\n\r\nThanks in advance :)\r\n\r\n---\r\n\r\n**Good First Issue**: This issue is good for first time contributors. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, please feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = (\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .order_by(File.filename)\n .all()\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.packagetype)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}]} | 1,731 | 249 |
gh_patches_debug_9708 | rasdani/github-patches | git_diff | praw-dev__praw-1810 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed to upload a video.
**Describe the bug**
Failed to upload a video.
**To Reproduce**
Steps to reproduce the behavior:
submit any video
**Code/Logs**
```
>>> s = sbrdt.submit_video ('video', 'myvideo.mp4')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py", line 1383, in submit_video
video_poster_url=self._upload_media(thumbnail_path)[0],
File "/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py", line 695, in _upload_media
with open(media_path, "rb") as media:
FileNotFoundError: [Errno 2] No such file or directory: '/home/gaspar/.local/lib/python3.9/site-packages/praw/images/PRAW logo.png'
```
**System Info**
- OS: Arch Linux
- Python: 3.9.5
- PRAW Version: 7.4.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """praw setup.py"""
2
3 import re
4 from codecs import open
5 from os import path
6
7 from setuptools import find_packages, setup
8
9 PACKAGE_NAME = "praw"
10 HERE = path.abspath(path.dirname(__file__))
11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
12 README = fp.read()
13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
15
16 extras = {
17 "ci": ["coveralls"],
18 "dev": ["packaging"],
19 "lint": [
20 "pre-commit",
21 "sphinx",
22 "sphinx_rtd_theme",
23 ],
24 "readthedocs": ["sphinx", "sphinx_rtd_theme"],
25 "test": [
26 "betamax >=0.8, <0.9",
27 "betamax-matchers >=0.3.0, <0.5",
28 "pytest >=2.7.3",
29 ],
30 }
31 extras["dev"] += extras["lint"] + extras["test"]
32
33 setup(
34 name=PACKAGE_NAME,
35 author="Bryce Boe",
36 author_email="[email protected]",
37 python_requires="~=3.6",
38 classifiers=[
39 "Development Status :: 5 - Production/Stable",
40 "Environment :: Console",
41 "Intended Audience :: Developers",
42 "License :: OSI Approved :: BSD License",
43 "Natural Language :: English",
44 "Operating System :: OS Independent",
45 "Programming Language :: Python",
46 "Programming Language :: Python :: 3",
47 "Programming Language :: Python :: 3.6",
48 "Programming Language :: Python :: 3.7",
49 "Programming Language :: Python :: 3.8",
50 "Programming Language :: Python :: 3.9",
51 "Programming Language :: Python :: 3.10",
52 "Topic :: Utilities",
53 ],
54 description=(
55 "PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that"
56 " allows for simple access to reddit's API."
57 ),
58 extras_require=extras,
59 install_requires=[
60 "prawcore >=2.1, <3",
61 "update_checker >=0.18",
62 "websocket-client >=0.54.0",
63 ],
64 keywords="reddit api wrapper",
65 license="Simplified BSD License",
66 long_description=README,
67 package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.jpg"]},
68 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
69 project_urls={
70 "Change Log": "https://praw.readthedocs.io/en/latest/package_info/change_log.html",
71 "Documentation": "https://praw.readthedocs.io/",
72 "Issue Tracker": "https://github.com/praw-dev/praw/issues",
73 "Source Code": "https://github.com/praw-dev/praw",
74 },
75 version=VERSION,
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,7 @@
keywords="reddit api wrapper",
license="Simplified BSD License",
long_description=README,
- package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.jpg"]},
+ package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.png"]},
packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
project_urls={
"Change Log": "https://praw.readthedocs.io/en/latest/package_info/change_log.html",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,7 +64,7 @@\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n- package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"]},\n+ package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.png\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n", "issue": "Failed to upload a video.\n**Describe the bug**\r\nFailed to upload a video.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nsubmit any video\r\n\r\n**Code/Logs**\r\n```\r\n>>> s = sbrdt.submit_video ('video', 'myvideo.mp4')\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py\", line 1383, in submit_video\r\n video_poster_url=self._upload_media(thumbnail_path)[0],\r\n File \"/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py\", line 695, in _upload_media\r\n with open(media_path, \"rb\") as media:\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/gaspar/.local/lib/python3.9/site-packages/praw/images/PRAW logo.png'\r\n```\r\n\r\n**System Info**\r\n - OS: Arch Linux\r\n - Python: 3.9.5\r\n - PRAW Version: 7.4.0\r\n\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"packaging\"],\n \"lint\": [\n \"pre-commit\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n ],\n \"readthedocs\": [\"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\"~=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that\"\n \" allows for simple access to reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=2.1, <3\",\n \"update_checker >=0.18\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n \"Documentation\": \"https://praw.readthedocs.io/\",\n \"Issue Tracker\": \"https://github.com/praw-dev/praw/issues\",\n \"Source Code\": \"https://github.com/praw-dev/praw\",\n },\n version=VERSION,\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"packaging\"],\n \"lint\": [\n \"pre-commit\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n ],\n \"readthedocs\": [\"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\"~=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that\"\n \" allows for simple access to reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=2.1, <3\",\n \"update_checker >=0.18\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.png\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n \"Documentation\": \"https://praw.readthedocs.io/\",\n \"Issue Tracker\": \"https://github.com/praw-dev/praw/issues\",\n \"Source Code\": \"https://github.com/praw-dev/praw\",\n },\n version=VERSION,\n)\n", "path": "setup.py"}]} | 1,298 | 145 |
gh_patches_debug_35228 | rasdani/github-patches | git_diff | mirumee__ariadne-529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenTracing plugin performs deepcopy of resolver's args, which fails when file upload for larger file is used.
OpenTracing performs deep copy of arguments passed to the resolver function when args filtering is used (eg. to hide passwords), but this apparently fails there's larger uploaded file in the args.
Potential fix would be default filter that replaces uploaded files with cheap str representation (eg. `<UploadedFile(name="test.jpg", type="image/jpeg", size=44100)>`) before custom filtering logic is ran next.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/contrib/tracing/opentracing.py`
Content:
```
1 from copy import deepcopy
2 from functools import partial
3 from inspect import isawaitable
4 from typing import Any, Callable, Dict, Optional
5
6 from graphql import GraphQLResolveInfo
7 from opentracing import Scope, Tracer, global_tracer
8 from opentracing.ext import tags
9
10 from ...types import ContextValue, Extension, Resolver
11 from .utils import format_path, should_trace
12
13 ArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]
14
15
16 class OpenTracingExtension(Extension):
17 _arg_filter: Optional[ArgFilter]
18 _root_scope: Scope
19 _tracer: Tracer
20
21 def __init__(self, *, arg_filter: Optional[ArgFilter] = None):
22 self._arg_filter = arg_filter
23 self._tracer = global_tracer()
24 self._root_scope = None
25
26 def request_started(self, context: ContextValue):
27 self._root_scope = self._tracer.start_active_span("GraphQL Query")
28 self._root_scope.span.set_tag(tags.COMPONENT, "graphql")
29
30 def request_finished(self, context: ContextValue):
31 self._root_scope.close()
32
33 async def resolve(
34 self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs
35 ):
36 if not should_trace(info):
37 result = next_(parent, info, **kwargs)
38 if isawaitable(result):
39 result = await result
40 return result
41
42 with self._tracer.start_active_span(info.field_name) as scope:
43 span = scope.span
44 span.set_tag(tags.COMPONENT, "graphql")
45 span.set_tag("graphql.parentType", info.parent_type.name)
46
47 graphql_path = ".".join(
48 map(str, format_path(info.path)) # pylint: disable=bad-builtin
49 )
50 span.set_tag("graphql.path", graphql_path)
51
52 if kwargs:
53 filtered_kwargs = self.filter_resolver_args(kwargs, info)
54 for kwarg, value in filtered_kwargs.items():
55 span.set_tag(f"graphql.param.{kwarg}", value)
56
57 result = next_(parent, info, **kwargs)
58 if isawaitable(result):
59 result = await result
60 return result
61
62 def filter_resolver_args(
63 self, args: Dict[str, Any], info: GraphQLResolveInfo
64 ) -> Dict[str, Any]:
65 if not self._arg_filter:
66 return args
67
68 return self._arg_filter(deepcopy(args), info)
69
70
71 class OpenTracingExtensionSync(OpenTracingExtension):
72 def resolve(
73 self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs
74 ): # pylint: disable=invalid-overridden-method
75 if not should_trace(info):
76 result = next_(parent, info, **kwargs)
77 return result
78
79 with self._tracer.start_active_span(info.field_name) as scope:
80 span = scope.span
81 span.set_tag(tags.COMPONENT, "graphql")
82 span.set_tag("graphql.parentType", info.parent_type.name)
83
84 graphql_path = ".".join(
85 map(str, format_path(info.path)) # pylint: disable=bad-builtin
86 )
87 span.set_tag("graphql.path", graphql_path)
88
89 if kwargs:
90 filtered_kwargs = self.filter_resolver_args(kwargs, info)
91 for kwarg, value in filtered_kwargs.items():
92 span.set_tag(f"graphql.param.{kwarg}", value)
93
94 result = next_(parent, info, **kwargs)
95 return result
96
97
98 def opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):
99 return partial(OpenTracingExtension, arg_filter=arg_filter)
100
101
102 def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):
103 return partial(OpenTracingExtensionSync, arg_filter=arg_filter)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ariadne/contrib/tracing/opentracing.py b/ariadne/contrib/tracing/opentracing.py
--- a/ariadne/contrib/tracing/opentracing.py
+++ b/ariadne/contrib/tracing/opentracing.py
@@ -1,11 +1,13 @@
-from copy import deepcopy
+import cgi
+import os
from functools import partial
from inspect import isawaitable
-from typing import Any, Callable, Dict, Optional
+from typing import Any, Callable, Dict, Optional, Union
from graphql import GraphQLResolveInfo
from opentracing import Scope, Tracer, global_tracer
from opentracing.ext import tags
+from starlette.datastructures import UploadFile
from ...types import ContextValue, Extension, Resolver
from .utils import format_path, should_trace
@@ -62,10 +64,12 @@
def filter_resolver_args(
self, args: Dict[str, Any], info: GraphQLResolveInfo
) -> Dict[str, Any]:
+ args_to_trace = copy_args_for_tracing(args)
+
if not self._arg_filter:
- return args
+ return args_to_trace
- return self._arg_filter(deepcopy(args), info)
+ return self._arg_filter(args_to_trace, info)
class OpenTracingExtensionSync(OpenTracingExtension):
@@ -101,3 +105,34 @@
def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):
return partial(OpenTracingExtensionSync, arg_filter=arg_filter)
+
+
+def copy_args_for_tracing(value: Any) -> Any:
+ if isinstance(value, dict):
+ return {k: copy_args_for_tracing(v) for k, v in value.items()}
+ if isinstance(value, list):
+ return [copy_args_for_tracing(v) for v in value]
+ if isinstance(value, (UploadFile, cgi.FieldStorage)):
+ return repr_upload_file(value)
+ return value
+
+
+def repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str:
+ filename = upload_file.filename
+
+ if isinstance(upload_file, cgi.FieldStorage):
+ mime_type = upload_file.type
+ else:
+ mime_type = upload_file.content_type
+
+ if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage):
+ size = len(upload_file.value) if upload_file.value is not None else 0
+ else:
+ file_ = upload_file.file
+ file_.seek(0, os.SEEK_END)
+ size = file_.tell()
+ file_.seek(0)
+
+ return (
+ f"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})"
+ )
| {"golden_diff": "diff --git a/ariadne/contrib/tracing/opentracing.py b/ariadne/contrib/tracing/opentracing.py\n--- a/ariadne/contrib/tracing/opentracing.py\n+++ b/ariadne/contrib/tracing/opentracing.py\n@@ -1,11 +1,13 @@\n-from copy import deepcopy\n+import cgi\n+import os\n from functools import partial\n from inspect import isawaitable\n-from typing import Any, Callable, Dict, Optional\n+from typing import Any, Callable, Dict, Optional, Union\n \n from graphql import GraphQLResolveInfo\n from opentracing import Scope, Tracer, global_tracer\n from opentracing.ext import tags\n+from starlette.datastructures import UploadFile\n \n from ...types import ContextValue, Extension, Resolver\n from .utils import format_path, should_trace\n@@ -62,10 +64,12 @@\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n+ args_to_trace = copy_args_for_tracing(args)\n+\n if not self._arg_filter:\n- return args\n+ return args_to_trace\n \n- return self._arg_filter(deepcopy(args), info)\n+ return self._arg_filter(args_to_trace, info)\n \n \n class OpenTracingExtensionSync(OpenTracingExtension):\n@@ -101,3 +105,34 @@\n \n def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n+\n+\n+def copy_args_for_tracing(value: Any) -> Any:\n+ if isinstance(value, dict):\n+ return {k: copy_args_for_tracing(v) for k, v in value.items()}\n+ if isinstance(value, list):\n+ return [copy_args_for_tracing(v) for v in value]\n+ if isinstance(value, (UploadFile, cgi.FieldStorage)):\n+ return repr_upload_file(value)\n+ return value\n+\n+\n+def repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str:\n+ filename = upload_file.filename\n+\n+ if isinstance(upload_file, cgi.FieldStorage):\n+ mime_type = upload_file.type\n+ else:\n+ mime_type = upload_file.content_type\n+\n+ if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage):\n+ size = len(upload_file.value) if upload_file.value is not None else 0\n+ else:\n+ file_ = upload_file.file\n+ file_.seek(0, os.SEEK_END)\n+ size = file_.tell()\n+ file_.seek(0)\n+\n+ return (\n+ f\"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})\"\n+ )\n", "issue": "OpenTracing plugin performs deepcopy of resolver's args, which fails when file upload for larger file is used.\nOpenTracing performs deep copy of arguments passed to the resolver function when args filtering is used (eg. to hide passwords), but this apparently fails there's larger uploaded file in the args.\r\n\r\nPotential fix would be default filter that replaces uploaded files with cheap str representation (eg. `<UploadedFile(name=\"test.jpg\", type=\"image/jpeg\", size=44100)>`) before custom filtering logic is ran next.\n", "before_files": [{"content": "from copy import deepcopy\nfrom functools import partial\nfrom inspect import isawaitable\nfrom typing import Any, Callable, Dict, Optional\n\nfrom graphql import GraphQLResolveInfo\nfrom opentracing import Scope, Tracer, global_tracer\nfrom opentracing.ext import tags\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\nArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]\n\n\nclass OpenTracingExtension(Extension):\n _arg_filter: Optional[ArgFilter]\n _root_scope: Scope\n _tracer: Tracer\n\n def __init__(self, *, arg_filter: Optional[ArgFilter] = None):\n self._arg_filter = arg_filter\n self._tracer = global_tracer()\n self._root_scope = None\n\n def request_started(self, context: ContextValue):\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n\n def request_finished(self, context: ContextValue):\n self._root_scope.close()\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n if not self._arg_filter:\n return args\n\n return self._arg_filter(deepcopy(args), info)\n\n\nclass OpenTracingExtensionSync(OpenTracingExtension):\n def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ): # pylint: disable=invalid-overridden-method\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n return result\n\n\ndef opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtension, arg_filter=arg_filter)\n\n\ndef opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n", "path": "ariadne/contrib/tracing/opentracing.py"}], "after_files": [{"content": "import cgi\nimport os\nfrom functools import partial\nfrom inspect import isawaitable\nfrom typing import Any, Callable, Dict, Optional, Union\n\nfrom graphql import GraphQLResolveInfo\nfrom opentracing import Scope, Tracer, global_tracer\nfrom opentracing.ext import tags\nfrom starlette.datastructures import UploadFile\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\nArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]\n\n\nclass OpenTracingExtension(Extension):\n _arg_filter: Optional[ArgFilter]\n _root_scope: Scope\n _tracer: Tracer\n\n def __init__(self, *, arg_filter: Optional[ArgFilter] = None):\n self._arg_filter = arg_filter\n self._tracer = global_tracer()\n self._root_scope = None\n\n def request_started(self, context: ContextValue):\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n\n def request_finished(self, context: ContextValue):\n self._root_scope.close()\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n args_to_trace = copy_args_for_tracing(args)\n\n if not self._arg_filter:\n return args_to_trace\n\n return self._arg_filter(args_to_trace, info)\n\n\nclass OpenTracingExtensionSync(OpenTracingExtension):\n def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ): # pylint: disable=invalid-overridden-method\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n return result\n\n\ndef opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtension, arg_filter=arg_filter)\n\n\ndef opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n\n\ndef copy_args_for_tracing(value: Any) -> Any:\n if isinstance(value, dict):\n return {k: copy_args_for_tracing(v) for k, v in value.items()}\n if isinstance(value, list):\n return [copy_args_for_tracing(v) for v in value]\n if isinstance(value, (UploadFile, cgi.FieldStorage)):\n return repr_upload_file(value)\n return value\n\n\ndef repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str:\n filename = upload_file.filename\n\n if isinstance(upload_file, cgi.FieldStorage):\n mime_type = upload_file.type\n else:\n mime_type = upload_file.content_type\n\n if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage):\n size = len(upload_file.value) if upload_file.value is not None else 0\n else:\n file_ = upload_file.file\n file_.seek(0, os.SEEK_END)\n size = file_.tell()\n file_.seek(0)\n\n return (\n f\"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})\"\n )\n", "path": "ariadne/contrib/tracing/opentracing.py"}]} | 1,392 | 623 |
gh_patches_debug_13894 | rasdani/github-patches | git_diff | mars-project__mars-1699 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Run the example code hangs in distributed mode
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
Create a Mars cluster and run the code in readme:
``` Python
import mars.tensor as mt
N = 200_000_000
a = mt.random.uniform(-1, 1, size=(N, 2))
print(((mt.linalg.norm(a, axis=1) < 1)
.sum() * 4 / N).execute())
```
it hangs and error be found in server client:
```
2020-11-09 21:30:22,053 mars.scheduler.operands.common 97 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 230bef1901408a5f9134f34444918898 in 11.238.146.2:35131
Traceback (most recent call last):
File "/home/admin/work/public-mars-0.5.4.zip/mars/promise.py", line 378, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/public-mars-0.5.4.zip/mars/utils.py", line 365, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py", line 564, in execute_graph
quota_request = self._prepare_quota_request(session_id, graph_key)
File "/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py", line 249, in _prepare_quota_request
memory_estimations = self._estimate_calc_memory(session_id, graph_key)
File "/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py", line 213, in _estimate_calc_memory
res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 690, in execute_graph
res = graph_execution.execute(retval)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 571, in execute
future.result()
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 186, in result
raise self._exc_info[1] from None
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 198, in submit
return self._MockResult(fn(*args, **kwargs))
File "/home/admin/work/public-mars-0.5.4.zip/mars/utils.py", line 439, in _inner
return func(*args, **kwargs)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 443, in _execute_operand
Executor.handle(first_op, results, self._mock)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 641, in handle
return runner(results, op)
File "/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/ne.py", line 75, in estimate_size
estimate_fuse_size(ctx, op)
File "/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/core.py", line 49, in estimate_fuse_size
results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 690, in execute_graph
res = graph_execution.execute(retval)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 571, in execute
future.result()
File "/opt/conda/lib/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/opt/conda/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/opt/conda/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/home/admin/work/public-mars-0.5.4.zip/mars/utils.py", line 439, in _inner
return func(*args, **kwargs)
File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 486, in _execute_operand
del results[dep_key]
KeyError: '94e11781368129674925eb2d4ae093bf'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/tensor/fuse/core.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from ...operands import FuseChunkMixin
18 from ..operands import TensorFuse, TensorOperandMixin
19
20
21 class TensorFuseChunkMixin(FuseChunkMixin, TensorOperandMixin):
22 __slots__ = ()
23
24
25 class TensorFuseChunk(TensorFuse, TensorFuseChunkMixin):
26 def __init__(self, dtype=None, **kw):
27 super().__init__(_dtype=dtype, **kw)
28
29
30 def estimate_fuse_size(ctx, op):
31 from ...graph import DAG
32 from ...executor import Executor
33
34 chunk = op.outputs[0]
35 dag = DAG()
36 size_ctx = dict()
37 keys = set(c.key for c in chunk.composed)
38 for c in chunk.composed:
39 dag.add_node(c)
40 for inp in c.inputs:
41 if inp.key not in keys:
42 size_ctx[inp.key] = ctx[inp.key]
43 if inp not in dag:
44 dag.add_node(inp)
45 dag.add_edge(inp, c)
46
47 executor = Executor(storage=size_ctx)
48 output_keys = [o.key for o in op.outputs]
49 results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)
50 ctx.update(zip(output_keys, results))
51
52 # update with the maximal memory cost during the whole execution
53 total_mem = sum(ctx[key][1] for key in output_keys)
54 if total_mem:
55 for key in output_keys:
56 r = ctx[key]
57 ctx[key] = (r[0], max(r[1], r[1] * executor.mock_max_memory // total_mem))
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/tensor/fuse/core.py b/mars/tensor/fuse/core.py
--- a/mars/tensor/fuse/core.py
+++ b/mars/tensor/fuse/core.py
@@ -30,6 +30,7 @@
def estimate_fuse_size(ctx, op):
from ...graph import DAG
from ...executor import Executor
+ from ...utils import build_fetch_chunk
chunk = op.outputs[0]
dag = DAG()
@@ -40,6 +41,7 @@
for inp in c.inputs:
if inp.key not in keys:
size_ctx[inp.key] = ctx[inp.key]
+ inp = build_fetch_chunk(inp).data
if inp not in dag:
dag.add_node(inp)
dag.add_edge(inp, c)
| {"golden_diff": "diff --git a/mars/tensor/fuse/core.py b/mars/tensor/fuse/core.py\n--- a/mars/tensor/fuse/core.py\n+++ b/mars/tensor/fuse/core.py\n@@ -30,6 +30,7 @@\n def estimate_fuse_size(ctx, op):\n from ...graph import DAG\n from ...executor import Executor\n+ from ...utils import build_fetch_chunk\n \n chunk = op.outputs[0]\n dag = DAG()\n@@ -40,6 +41,7 @@\n for inp in c.inputs:\n if inp.key not in keys:\n size_ctx[inp.key] = ctx[inp.key]\n+ inp = build_fetch_chunk(inp).data\n if inp not in dag:\n dag.add_node(inp)\n dag.add_edge(inp, c)\n", "issue": "[BUG] Run the example code hangs in distributed mode\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nCreate a Mars cluster and run the code in readme:\r\n``` Python\r\nimport mars.tensor as mt\r\nN = 200_000_000\r\na = mt.random.uniform(-1, 1, size=(N, 2))\r\nprint(((mt.linalg.norm(a, axis=1) < 1)\r\n .sum() * 4 / N).execute())\r\n```\r\n\r\nit hangs and error be found in server client:\r\n```\r\n2020-11-09 21:30:22,053 mars.scheduler.operands.common 97 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 230bef1901408a5f9134f34444918898 in 11.238.146.2:35131\r\nTraceback (most recent call last):\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/promise.py\", line 378, in _wrapped\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/utils.py\", line 365, in _wrapped\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py\", line 564, in execute_graph\r\n quota_request = self._prepare_quota_request(session_id, graph_key)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py\", line 249, in _prepare_quota_request\r\n memory_estimations = self._estimate_calc_memory(session_id, graph_key)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py\", line 213, in _estimate_calc_memory\r\n res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 690, in execute_graph\r\n res = graph_execution.execute(retval)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 571, in execute\r\n future.result()\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 186, in result\r\n raise self._exc_info[1] from None\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 198, in submit\r\n return self._MockResult(fn(*args, **kwargs))\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/utils.py\", line 439, in _inner\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 443, in _execute_operand\r\n Executor.handle(first_op, results, self._mock)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 641, in handle\r\n return runner(results, op)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/ne.py\", line 75, in estimate_size\r\n estimate_fuse_size(ctx, op)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/core.py\", line 49, in estimate_fuse_size\r\n results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 690, in execute_graph\r\n res = graph_execution.execute(retval)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 571, in execute\r\n future.result()\r\n File \"/opt/conda/lib/python3.7/concurrent/futures/_base.py\", line 428, in result\r\n return self.__get_result()\r\n File \"/opt/conda/lib/python3.7/concurrent/futures/_base.py\", line 384, in __get_result\r\n raise self._exception\r\n File \"/opt/conda/lib/python3.7/concurrent/futures/thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/utils.py\", line 439, in _inner\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 486, in _execute_operand\r\n del results[dep_key]\r\nKeyError: '94e11781368129674925eb2d4ae093bf'\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ...operands import FuseChunkMixin\nfrom ..operands import TensorFuse, TensorOperandMixin\n\n\nclass TensorFuseChunkMixin(FuseChunkMixin, TensorOperandMixin):\n __slots__ = ()\n\n\nclass TensorFuseChunk(TensorFuse, TensorFuseChunkMixin):\n def __init__(self, dtype=None, **kw):\n super().__init__(_dtype=dtype, **kw)\n\n\ndef estimate_fuse_size(ctx, op):\n from ...graph import DAG\n from ...executor import Executor\n\n chunk = op.outputs[0]\n dag = DAG()\n size_ctx = dict()\n keys = set(c.key for c in chunk.composed)\n for c in chunk.composed:\n dag.add_node(c)\n for inp in c.inputs:\n if inp.key not in keys:\n size_ctx[inp.key] = ctx[inp.key]\n if inp not in dag:\n dag.add_node(inp)\n dag.add_edge(inp, c)\n\n executor = Executor(storage=size_ctx)\n output_keys = [o.key for o in op.outputs]\n results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)\n ctx.update(zip(output_keys, results))\n\n # update with the maximal memory cost during the whole execution\n total_mem = sum(ctx[key][1] for key in output_keys)\n if total_mem:\n for key in output_keys:\n r = ctx[key]\n ctx[key] = (r[0], max(r[1], r[1] * executor.mock_max_memory // total_mem))\n", "path": "mars/tensor/fuse/core.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ...operands import FuseChunkMixin\nfrom ..operands import TensorFuse, TensorOperandMixin\n\n\nclass TensorFuseChunkMixin(FuseChunkMixin, TensorOperandMixin):\n __slots__ = ()\n\n\nclass TensorFuseChunk(TensorFuse, TensorFuseChunkMixin):\n def __init__(self, dtype=None, **kw):\n super().__init__(_dtype=dtype, **kw)\n\n\ndef estimate_fuse_size(ctx, op):\n from ...graph import DAG\n from ...executor import Executor\n from ...utils import build_fetch_chunk\n\n chunk = op.outputs[0]\n dag = DAG()\n size_ctx = dict()\n keys = set(c.key for c in chunk.composed)\n for c in chunk.composed:\n dag.add_node(c)\n for inp in c.inputs:\n if inp.key not in keys:\n size_ctx[inp.key] = ctx[inp.key]\n inp = build_fetch_chunk(inp).data\n if inp not in dag:\n dag.add_node(inp)\n dag.add_edge(inp, c)\n\n executor = Executor(storage=size_ctx)\n output_keys = [o.key for o in op.outputs]\n results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)\n ctx.update(zip(output_keys, results))\n\n # update with the maximal memory cost during the whole execution\n total_mem = sum(ctx[key][1] for key in output_keys)\n if total_mem:\n for key in output_keys:\n r = ctx[key]\n ctx[key] = (r[0], max(r[1], r[1] * executor.mock_max_memory // total_mem))\n", "path": "mars/tensor/fuse/core.py"}]} | 2,024 | 174 |
gh_patches_debug_9307 | rasdani/github-patches | git_diff | streamlink__streamlink-4210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.tviplayer: unable to handle CNN Portugal
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest stable release
### Description
- issue:
- the new `tviplayer` plugin is unable to handle https://tviplayer.iol.pt/direto/CNN
- of note, the previous TVI 24 became CNN Portugal after #4199.
- to reproduce:
```sh
streamlink https://tviplayer.iol.pt/direto/CNN
```
```sh
[cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN
error: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\'1.0\' encoding=\'U ...)
```
### Debug log
```text
streamlink --loglevel debug https://tviplayer.iol.pt/direto/CNN
[cli][debug] OS: Linux-5.10.0-9-amd64-x86_64-with-glibc2.31
[cli][debug] Python: 3.9.2
[cli][debug] Streamlink: 3.0.2
[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)
[cli][debug] Arguments:
[cli][debug] url=https://tviplayer.iol.pt/direto/CNN
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN
error: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\'1.0\' encoding=\'U ...)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/utils/parse.py`
Content:
```
1 import json
2 import re
3 from urllib.parse import parse_qsl
4
5 from lxml.etree import HTML, XML
6
7 from streamlink.plugin import PluginError
8
9
10 def _parse(parser, data, name, exception, schema, *args, **kwargs):
11 try:
12 parsed = parser(data, *args, **kwargs)
13 except Exception as err:
14 snippet = repr(data)
15 if len(snippet) > 35:
16 snippet = f"{snippet[:35]} ..."
17
18 raise exception(f"Unable to parse {name}: {err} ({snippet})")
19
20 if schema:
21 parsed = schema.validate(parsed, name=name, exception=exception)
22
23 return parsed
24
25
26 def parse_json(
27 data,
28 name="JSON",
29 exception=PluginError,
30 schema=None,
31 *args, **kwargs
32 ):
33 """Wrapper around json.loads.
34
35 Provides these extra features:
36 - Wraps errors in custom exception with a snippet of the data in the message
37 """
38 return _parse(json.loads, data, name, exception, schema, *args, **kwargs)
39
40
41 def parse_html(
42 data,
43 name="HTML",
44 exception=PluginError,
45 schema=None,
46 *args, **kwargs
47 ):
48 """Wrapper around lxml.etree.HTML with some extras.
49
50 Provides these extra features:
51 - Wraps errors in custom exception with a snippet of the data in the message
52 """
53 return _parse(HTML, data, name, exception, schema, *args, **kwargs)
54
55
56 def parse_xml(
57 data,
58 ignore_ns=False,
59 invalid_char_entities=False,
60 name="XML",
61 exception=PluginError,
62 schema=None,
63 *args, **kwargs
64 ):
65 """Wrapper around lxml.etree.XML with some extras.
66
67 Provides these extra features:
68 - Handles incorrectly encoded XML
69 - Allows stripping namespace information
70 - Wraps errors in custom exception with a snippet of the data in the message
71 """
72 if isinstance(data, str):
73 data = bytes(data, "utf8")
74 if ignore_ns:
75 data = re.sub(br"\s+xmlns=\"(.+?)\"", b"", data)
76 if invalid_char_entities:
77 data = re.sub(br"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)", b"&", data)
78
79 return _parse(XML, data, name, exception, schema, *args, **kwargs)
80
81
82 def parse_qsd(
83 data,
84 name="query string",
85 exception=PluginError,
86 schema=None,
87 *args, **kwargs
88 ):
89 """Parses a query string into a dict.
90
91 Provides these extra features:
92 - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value
93 - Wraps errors in custom exception with a snippet of the data in the message
94 """
95 return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/utils/parse.py b/src/streamlink/utils/parse.py
--- a/src/streamlink/utils/parse.py
+++ b/src/streamlink/utils/parse.py
@@ -48,8 +48,12 @@
"""Wrapper around lxml.etree.HTML with some extras.
Provides these extra features:
+ - Removes XML declarations of invalid XHTML5 documents
- Wraps errors in custom exception with a snippet of the data in the message
"""
+ if isinstance(data, str) and data.lstrip().startswith("<?xml"):
+ data = re.sub(r"^\s*<\?xml.+?\?>", "", data)
+
return _parse(HTML, data, name, exception, schema, *args, **kwargs)
| {"golden_diff": "diff --git a/src/streamlink/utils/parse.py b/src/streamlink/utils/parse.py\n--- a/src/streamlink/utils/parse.py\n+++ b/src/streamlink/utils/parse.py\n@@ -48,8 +48,12 @@\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n \n Provides these extra features:\n+ - Removes XML declarations of invalid XHTML5 documents\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n+ if isinstance(data, str) and data.lstrip().startswith(\"<?xml\"):\n+ data = re.sub(r\"^\\s*<\\?xml.+?\\?>\", \"\", data)\n+\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n", "issue": "plugins.tviplayer: unable to handle CNN Portugal\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\n- issue:\r\n - the new `tviplayer` plugin is unable to handle https://tviplayer.iol.pt/direto/CNN \r\n - of note, the previous TVI 24 became CNN Portugal after #4199.\r\n\r\n- to reproduce:\r\n ```sh\r\n streamlink https://tviplayer.iol.pt/direto/CNN\r\n ```\r\n ```sh\r\n [cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN\r\n error: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\\'1.0\\' encoding=\\'U ...)\r\n ```\r\n\r\n\r\n\n\n### Debug log\n\n```text\nstreamlink --loglevel debug https://tviplayer.iol.pt/direto/CNN\r\n[cli][debug] OS: Linux-5.10.0-9-amd64-x86_64-with-glibc2.31\r\n[cli][debug] Python: 3.9.2\r\n[cli][debug] Streamlink: 3.0.2\r\n[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://tviplayer.iol.pt/direto/CNN\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN\r\nerror: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\\'1.0\\' encoding=\\'U ...)\n```\n\n", "before_files": [{"content": "import json\nimport re\nfrom urllib.parse import parse_qsl\n\nfrom lxml.etree import HTML, XML\n\nfrom streamlink.plugin import PluginError\n\n\ndef _parse(parser, data, name, exception, schema, *args, **kwargs):\n try:\n parsed = parser(data, *args, **kwargs)\n except Exception as err:\n snippet = repr(data)\n if len(snippet) > 35:\n snippet = f\"{snippet[:35]} ...\"\n\n raise exception(f\"Unable to parse {name}: {err} ({snippet})\")\n\n if schema:\n parsed = schema.validate(parsed, name=name, exception=exception)\n\n return parsed\n\n\ndef parse_json(\n data,\n name=\"JSON\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around json.loads.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(json.loads, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_html(\n data,\n name=\"HTML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_xml(\n data,\n ignore_ns=False,\n invalid_char_entities=False,\n name=\"XML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.XML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n data = re.sub(br\"\\s+xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&\", data)\n\n return _parse(XML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_qsd(\n data,\n name=\"query string\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Parses a query string into a dict.\n\n Provides these extra features:\n - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)\n", "path": "src/streamlink/utils/parse.py"}], "after_files": [{"content": "import json\nimport re\nfrom urllib.parse import parse_qsl\n\nfrom lxml.etree import HTML, XML\n\nfrom streamlink.plugin import PluginError\n\n\ndef _parse(parser, data, name, exception, schema, *args, **kwargs):\n try:\n parsed = parser(data, *args, **kwargs)\n except Exception as err:\n snippet = repr(data)\n if len(snippet) > 35:\n snippet = f\"{snippet[:35]} ...\"\n\n raise exception(f\"Unable to parse {name}: {err} ({snippet})\")\n\n if schema:\n parsed = schema.validate(parsed, name=name, exception=exception)\n\n return parsed\n\n\ndef parse_json(\n data,\n name=\"JSON\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around json.loads.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(json.loads, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_html(\n data,\n name=\"HTML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n\n Provides these extra features:\n - Removes XML declarations of invalid XHTML5 documents\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str) and data.lstrip().startswith(\"<?xml\"):\n data = re.sub(r\"^\\s*<\\?xml.+?\\?>\", \"\", data)\n\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_xml(\n data,\n ignore_ns=False,\n invalid_char_entities=False,\n name=\"XML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.XML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n data = re.sub(br\"\\s+xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&\", data)\n\n return _parse(XML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_qsd(\n data,\n name=\"query string\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Parses a query string into a dict.\n\n Provides these extra features:\n - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)\n", "path": "src/streamlink/utils/parse.py"}]} | 1,661 | 165 |
gh_patches_debug_24490 | rasdani/github-patches | git_diff | ansible__ansible-lint-3437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
no-handler: should not react on when-conditions containing "and" or "or"
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and master branch are affected too -->
##### Summary
Right now the rule `Tasks that run when changed should likely be handlers` (which BTW, i am a big fan of) would produce findings for all of this lines:
`when: mytask.changed`
`when: mytask is changed`
...
`when: mytask is changed and wartherIsNice|bool`
While i totally agree that the first two examples are bad practices and should produce a linter warning, i would not agree, that the last example should.
##### Proposed solution
As mentioned in #419 i could imagine of splitting up E503 into two rules, one of which reacts to single conditions and one for more complex conditions involving `and` or `or` - that way both could be skipped/disabled seperately.
As @ssbarnea pointed out, it might also be a solution to disable the check completeley for complex conditons.
##### Issue Type
- Bug Report
- ansible installation method: OS package
- ansible-lint installation method: pip
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/ansiblelint/rules/no_handler.py`
Content:
```
1 # Copyright (c) 2016 Will Thames <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 """UseHandlerRatherThanWhenChangedRule used with ansible-lint."""
22 from __future__ import annotations
23
24 import sys
25 from typing import TYPE_CHECKING
26
27 from ansiblelint.rules import AnsibleLintRule
28
29 if TYPE_CHECKING:
30 from ansiblelint.file_utils import Lintable
31 from ansiblelint.utils import Task
32
33
34 def _changed_in_when(item: str) -> bool:
35 if not isinstance(item, str):
36 return False
37 item_list = item.split()
38
39 if {"and", "not"} & set(item_list):
40 return False
41 return any(
42 changed in item
43 for changed in [
44 ".changed",
45 "|changed",
46 '["changed"]',
47 "['changed']",
48 "is changed",
49 ]
50 )
51
52
53 class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):
54 """Tasks that run when changed should likely be handlers."""
55
56 id = "no-handler"
57 description = (
58 "If a task has a ``when: result.changed`` setting, it is effectively "
59 "acting as a handler. You could use ``notify`` and move that task to "
60 "``handlers``."
61 )
62 link = "https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers"
63 severity = "MEDIUM"
64 tags = ["idiom"]
65 version_added = "historic"
66
67 def matchtask(
68 self,
69 task: Task,
70 file: Lintable | None = None,
71 ) -> bool | str:
72 if task["__ansible_action_type__"] != "task":
73 return False
74
75 when = task.get("when")
76
77 if isinstance(when, list):
78 for item in when:
79 if _changed_in_when(item):
80 return True
81 if isinstance(when, str):
82 return _changed_in_when(when)
83 return False
84
85
86 if "pytest" in sys.modules:
87 import pytest
88
89 from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports
90 from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports
91
92 @pytest.mark.parametrize(
93 ("test_file", "failures"),
94 (
95 pytest.param("examples/playbooks/no_handler_fail.yml", 7, id="fail"),
96 pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"),
97 ),
98 )
99 def test_no_handler(
100 default_rules_collection: RulesCollection,
101 test_file: str,
102 failures: int,
103 ) -> None:
104 """Test rule matches."""
105 results = Runner(test_file, rules=default_rules_collection).run()
106 assert len(results) == failures
107 for result in results:
108 assert result.tag == "no-handler"
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/ansiblelint/rules/no_handler.py b/src/ansiblelint/rules/no_handler.py
--- a/src/ansiblelint/rules/no_handler.py
+++ b/src/ansiblelint/rules/no_handler.py
@@ -36,7 +36,7 @@
return False
item_list = item.split()
- if {"and", "not"} & set(item_list):
+ if {"and", "or", "not"} & set(item_list):
return False
return any(
changed in item
@@ -75,9 +75,9 @@
when = task.get("when")
if isinstance(when, list):
- for item in when:
- if _changed_in_when(item):
- return True
+ if len(when) > 1:
+ return False
+ return _changed_in_when(when[0])
if isinstance(when, str):
return _changed_in_when(when)
return False
@@ -92,7 +92,7 @@
@pytest.mark.parametrize(
("test_file", "failures"),
(
- pytest.param("examples/playbooks/no_handler_fail.yml", 7, id="fail"),
+ pytest.param("examples/playbooks/no_handler_fail.yml", 5, id="fail"),
pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"),
),
)
| {"golden_diff": "diff --git a/src/ansiblelint/rules/no_handler.py b/src/ansiblelint/rules/no_handler.py\n--- a/src/ansiblelint/rules/no_handler.py\n+++ b/src/ansiblelint/rules/no_handler.py\n@@ -36,7 +36,7 @@\n return False\n item_list = item.split()\n \n- if {\"and\", \"not\"} & set(item_list):\n+ if {\"and\", \"or\", \"not\"} & set(item_list):\n return False\n return any(\n changed in item\n@@ -75,9 +75,9 @@\n when = task.get(\"when\")\n \n if isinstance(when, list):\n- for item in when:\n- if _changed_in_when(item):\n- return True\n+ if len(when) > 1:\n+ return False\n+ return _changed_in_when(when[0])\n if isinstance(when, str):\n return _changed_in_when(when)\n return False\n@@ -92,7 +92,7 @@\n @pytest.mark.parametrize(\n (\"test_file\", \"failures\"),\n (\n- pytest.param(\"examples/playbooks/no_handler_fail.yml\", 7, id=\"fail\"),\n+ pytest.param(\"examples/playbooks/no_handler_fail.yml\", 5, id=\"fail\"),\n pytest.param(\"examples/playbooks/no_handler_pass.yml\", 0, id=\"pass\"),\n ),\n )\n", "issue": "no-handler: should not react on when-conditions containing \"and\" or \"or\"\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and master branch are affected too -->\r\n\r\n##### Summary\r\nRight now the rule `Tasks that run when changed should likely be handlers` (which BTW, i am a big fan of) would produce findings for all of this lines:\r\n\r\n`when: mytask.changed`\r\n`when: mytask is changed`\r\n...\r\n`when: mytask is changed and wartherIsNice|bool`\r\n\r\nWhile i totally agree that the first two examples are bad practices and should produce a linter warning, i would not agree, that the last example should.\r\n\r\n##### Proposed solution\r\n\r\nAs mentioned in #419 i could imagine of splitting up E503 into two rules, one of which reacts to single conditions and one for more complex conditions involving `and` or `or` - that way both could be skipped/disabled seperately.\r\n\r\nAs @ssbarnea pointed out, it might also be a solution to disable the check completeley for complex conditons.\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n\r\n- ansible installation method: OS package\r\n- ansible-lint installation method: pip\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"UseHandlerRatherThanWhenChangedRule used with ansible-lint.\"\"\"\nfrom __future__ import annotations\n\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from ansiblelint.file_utils import Lintable\n from ansiblelint.utils import Task\n\n\ndef _changed_in_when(item: str) -> bool:\n if not isinstance(item, str):\n return False\n item_list = item.split()\n\n if {\"and\", \"not\"} & set(item_list):\n return False\n return any(\n changed in item\n for changed in [\n \".changed\",\n \"|changed\",\n '[\"changed\"]',\n \"['changed']\",\n \"is changed\",\n ]\n )\n\n\nclass UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):\n \"\"\"Tasks that run when changed should likely be handlers.\"\"\"\n\n id = \"no-handler\"\n description = (\n \"If a task has a ``when: result.changed`` setting, it is effectively \"\n \"acting as a handler. You could use ``notify`` and move that task to \"\n \"``handlers``.\"\n )\n link = \"https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers\"\n severity = \"MEDIUM\"\n tags = [\"idiom\"]\n version_added = \"historic\"\n\n def matchtask(\n self,\n task: Task,\n file: Lintable | None = None,\n ) -> bool | str:\n if task[\"__ansible_action_type__\"] != \"task\":\n return False\n\n when = task.get(\"when\")\n\n if isinstance(when, list):\n for item in when:\n if _changed_in_when(item):\n return True\n if isinstance(when, str):\n return _changed_in_when(when)\n return False\n\n\nif \"pytest\" in sys.modules:\n import pytest\n\n from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports\n from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports\n\n @pytest.mark.parametrize(\n (\"test_file\", \"failures\"),\n (\n pytest.param(\"examples/playbooks/no_handler_fail.yml\", 7, id=\"fail\"),\n pytest.param(\"examples/playbooks/no_handler_pass.yml\", 0, id=\"pass\"),\n ),\n )\n def test_no_handler(\n default_rules_collection: RulesCollection,\n test_file: str,\n failures: int,\n ) -> None:\n \"\"\"Test rule matches.\"\"\"\n results = Runner(test_file, rules=default_rules_collection).run()\n assert len(results) == failures\n for result in results:\n assert result.tag == \"no-handler\"\n", "path": "src/ansiblelint/rules/no_handler.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"UseHandlerRatherThanWhenChangedRule used with ansible-lint.\"\"\"\nfrom __future__ import annotations\n\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from ansiblelint.file_utils import Lintable\n from ansiblelint.utils import Task\n\n\ndef _changed_in_when(item: str) -> bool:\n if not isinstance(item, str):\n return False\n item_list = item.split()\n\n if {\"and\", \"or\", \"not\"} & set(item_list):\n return False\n return any(\n changed in item\n for changed in [\n \".changed\",\n \"|changed\",\n '[\"changed\"]',\n \"['changed']\",\n \"is changed\",\n ]\n )\n\n\nclass UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):\n \"\"\"Tasks that run when changed should likely be handlers.\"\"\"\n\n id = \"no-handler\"\n description = (\n \"If a task has a ``when: result.changed`` setting, it is effectively \"\n \"acting as a handler. You could use ``notify`` and move that task to \"\n \"``handlers``.\"\n )\n link = \"https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers\"\n severity = \"MEDIUM\"\n tags = [\"idiom\"]\n version_added = \"historic\"\n\n def matchtask(\n self,\n task: Task,\n file: Lintable | None = None,\n ) -> bool | str:\n if task[\"__ansible_action_type__\"] != \"task\":\n return False\n\n when = task.get(\"when\")\n\n if isinstance(when, list):\n if len(when) > 1:\n return False\n return _changed_in_when(when[0])\n if isinstance(when, str):\n return _changed_in_when(when)\n return False\n\n\nif \"pytest\" in sys.modules:\n import pytest\n\n from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports\n from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports\n\n @pytest.mark.parametrize(\n (\"test_file\", \"failures\"),\n (\n pytest.param(\"examples/playbooks/no_handler_fail.yml\", 5, id=\"fail\"),\n pytest.param(\"examples/playbooks/no_handler_pass.yml\", 0, id=\"pass\"),\n ),\n )\n def test_no_handler(\n default_rules_collection: RulesCollection,\n test_file: str,\n failures: int,\n ) -> None:\n \"\"\"Test rule matches.\"\"\"\n results = Runner(test_file, rules=default_rules_collection).run()\n assert len(results) == failures\n for result in results:\n assert result.tag == \"no-handler\"\n", "path": "src/ansiblelint/rules/no_handler.py"}]} | 1,568 | 303 |
gh_patches_debug_6754 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2455 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ContrainedFloatValue from pydantic needs support
<!-- Provide a general summary of the bug in the title above. -->
<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->
<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->
## Describe the Bug
<!-- A clear and concise description of what the bug is. -->
I am trying to import the below into a strawberry type
```
class coordinates(BaseModel):
latitude: float= Field(...,gt=-90,lt=90)
longitude: float= Field(...,gt=-180,lt=180)
accuracy: int | None = Field(None, gt=50, lt=100)
```
However, I run into this error:
TypeError: Coordinates fields cannot be resolved. Unexpected type '<class 'schema.ConstrainedFloatValue'>'
If, I change `latitude: float= Field(...,gt=-90,lt=90)` into `latitude: int= Field(...,gt=-90,lt=90)`
Then importing using the below works:
```
@strawberry.experimental.pydantic.type(model=coordinates)
class Coordinates:
"""
Class that takes in coordinates from GeoLocation Provider in front-end
"""
latitude: strawberry.auto
longitude: strawberry.auto
accuracy: strawberry.auto
timestamp: Date
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/experimental/pydantic/fields.py`
Content:
```
1 import builtins
2 from decimal import Decimal
3 from typing import Any, List, Optional, Type
4 from uuid import UUID
5
6 import pydantic
7 from pydantic import BaseModel
8 from pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype
9 from pydantic.utils import lenient_issubclass
10
11 from strawberry.experimental.pydantic.exceptions import (
12 UnregisteredTypeException,
13 UnsupportedTypeError,
14 )
15 from strawberry.types.types import TypeDefinition
16
17 try:
18 from typing import GenericAlias as TypingGenericAlias # type: ignore
19 except ImportError:
20 # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
21 TypingGenericAlias = ()
22
23
24 ATTR_TO_TYPE_MAP = {
25 "NoneStr": Optional[str],
26 "NoneBytes": Optional[bytes],
27 "StrBytes": None,
28 "NoneStrBytes": None,
29 "StrictStr": str,
30 "ConstrainedBytes": bytes,
31 "conbytes": bytes,
32 "ConstrainedStr": str,
33 "constr": str,
34 "EmailStr": str,
35 "PyObject": None,
36 "ConstrainedInt": int,
37 "conint": int,
38 "PositiveInt": int,
39 "NegativeInt": int,
40 "ConstrainedFloat": float,
41 "confloat": float,
42 "PositiveFloat": float,
43 "NegativeFloat": float,
44 "ConstrainedDecimal": Decimal,
45 "condecimal": Decimal,
46 "UUID1": UUID,
47 "UUID3": UUID,
48 "UUID4": UUID,
49 "UUID5": UUID,
50 "FilePath": None,
51 "DirectoryPath": None,
52 "Json": None,
53 "JsonWrapper": None,
54 "SecretStr": str,
55 "SecretBytes": bytes,
56 "StrictBool": bool,
57 "StrictInt": int,
58 "StrictFloat": float,
59 "PaymentCardNumber": None,
60 "ByteSize": None,
61 "AnyUrl": str,
62 "AnyHttpUrl": str,
63 "HttpUrl": str,
64 "PostgresDsn": str,
65 "RedisDsn": str,
66 }
67
68
69 FIELDS_MAP = {
70 getattr(pydantic, field_name): type
71 for field_name, type in ATTR_TO_TYPE_MAP.items()
72 if hasattr(pydantic, field_name)
73 }
74
75
76 def get_basic_type(type_) -> Type[Any]:
77 if lenient_issubclass(type_, pydantic.ConstrainedInt):
78 return int
79 if lenient_issubclass(type_, pydantic.ConstrainedStr):
80 return str
81 if lenient_issubclass(type_, pydantic.ConstrainedList):
82 return List[get_basic_type(type_.item_type)] # type: ignore
83
84 if type_ in FIELDS_MAP:
85 type_ = FIELDS_MAP.get(type_)
86
87 if type_ is None:
88 raise UnsupportedTypeError()
89
90 if is_new_type(type_):
91 return new_type_supertype(type_)
92
93 return type_
94
95
96 def replace_pydantic_types(type_: Any, is_input: bool):
97 if lenient_issubclass(type_, BaseModel):
98 attr = "_strawberry_input_type" if is_input else "_strawberry_type"
99 if hasattr(type_, attr):
100 return getattr(type_, attr)
101 else:
102 raise UnregisteredTypeException(type_)
103 return type_
104
105
106 def replace_types_recursively(type_: Any, is_input: bool) -> Any:
107 """Runs the conversions recursively into the arguments of generic types if any"""
108 basic_type = get_basic_type(type_)
109 replaced_type = replace_pydantic_types(basic_type, is_input)
110
111 origin = get_origin(type_)
112 if not origin or not hasattr(type_, "__args__"):
113 return replaced_type
114
115 converted = tuple(
116 replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)
117 )
118
119 if isinstance(replaced_type, TypingGenericAlias):
120 return TypingGenericAlias(origin, converted)
121
122 replaced_type = replaced_type.copy_with(converted)
123
124 if isinstance(replaced_type, TypeDefinition):
125 # TODO: Not sure if this is necessary. No coverage in tests
126 # TODO: Unnecessary with StrawberryObject
127 replaced_type = builtins.type(
128 replaced_type.name,
129 (),
130 {"_type_definition": replaced_type},
131 )
132
133 return replaced_type
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py
--- a/strawberry/experimental/pydantic/fields.py
+++ b/strawberry/experimental/pydantic/fields.py
@@ -76,6 +76,8 @@
def get_basic_type(type_) -> Type[Any]:
if lenient_issubclass(type_, pydantic.ConstrainedInt):
return int
+ if lenient_issubclass(type_, pydantic.ConstrainedFloat):
+ return float
if lenient_issubclass(type_, pydantic.ConstrainedStr):
return str
if lenient_issubclass(type_, pydantic.ConstrainedList):
| {"golden_diff": "diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py\n--- a/strawberry/experimental/pydantic/fields.py\n+++ b/strawberry/experimental/pydantic/fields.py\n@@ -76,6 +76,8 @@\n def get_basic_type(type_) -> Type[Any]:\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n+ if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n+ return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n", "issue": "ContrainedFloatValue from pydantic needs support\n<!-- Provide a general summary of the bug in the title above. -->\r\n\r\n<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->\r\n<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->\r\n\r\n## Describe the Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nI am trying to import the below into a strawberry type\r\n\r\n```\r\nclass coordinates(BaseModel):\r\n latitude: float= Field(...,gt=-90,lt=90)\r\n longitude: float= Field(...,gt=-180,lt=180)\r\n accuracy: int | None = Field(None, gt=50, lt=100)\r\n```\r\n\r\nHowever, I run into this error:\r\n\r\nTypeError: Coordinates fields cannot be resolved. Unexpected type '<class 'schema.ConstrainedFloatValue'>'\r\n\r\nIf, I change `latitude: float= Field(...,gt=-90,lt=90)` into `latitude: int= Field(...,gt=-90,lt=90)`\r\n\r\nThen importing using the below works:\r\n\r\n```\r\[email protected](model=coordinates)\r\nclass Coordinates:\r\n \"\"\" \r\n Class that takes in coordinates from GeoLocation Provider in front-end\r\n \"\"\" \r\n latitude: strawberry.auto\r\n longitude: strawberry.auto\r\n accuracy: strawberry.auto\r\n timestamp: Date\r\n```\r\n\r\n\n", "before_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, List, Optional, Type\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\nfrom pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype\nfrom pydantic.utils import lenient_issubclass\n\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import TypeDefinition\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n TypingGenericAlias = ()\n\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\n\nFIELDS_MAP = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n}\n\n\ndef get_basic_type(type_) -> Type[Any]:\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool):\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"_type_definition\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}], "after_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, List, Optional, Type\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\nfrom pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype\nfrom pydantic.utils import lenient_issubclass\n\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import TypeDefinition\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n TypingGenericAlias = ()\n\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\n\nFIELDS_MAP = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n}\n\n\ndef get_basic_type(type_) -> Type[Any]:\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool):\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"_type_definition\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}]} | 1,815 | 164 |
gh_patches_debug_7454 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider target_au is broken
During the global build at 2021-05-26-14-42-23, spider **target_au** failed with **0 features** and **16 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/target_au.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/target_au.py`
Content:
```
1 import scrapy
2
3 from locations.hours import OpeningHours
4 from locations.items import GeojsonPointItem
5
6
7 class TargetAUSpider(scrapy.Spider):
8 name = "target_au"
9 item_attributes = { 'brand': "Target", 'brand_wikidata': "Q7685854" }
10 allowed_domains = ["target.com.au"]
11 states = ["nsw","vic","qld","nt", "act", "sa", "tas", "wa"]
12 headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0",
13 "Referer": "https://www.target.com.au/store-finder"}
14
15 custom_settings = {'DOWNLOAD_DELAY' : 0.5,}
16
17 def start_requests(self):
18 url = "https://www.target.com.au/store-finder/state/{}"
19 for state in self.states:
20 yield scrapy.Request(url.format(state),headers=self.headers, callback=self.parse)
21
22
23 def parse(self, response):
24 store_links = response.xpath('//a[@class="table-tap-canonical"]/@href').getall()
25 for link in store_links:
26 yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)
27
28 def _parse_hour_str(self, hour_string):
29 time_, am_pm = tuple(hour_string.split(" "))
30 hour, min = tuple(time_.split(":"))
31 hour = int(hour)
32 if am_pm == "PM":
33 hour += 12
34 return f"{hour}:{min}"
35
36 def parse_hours(self, hours_node):
37 opening_hours = OpeningHours()
38 days = hours_node.xpath(".//dt/text()").getall()
39 hours = hours_node.xpath(".//dd/text()").getall()
40 for idx, day in enumerate(days):
41 store_hours = hours[idx]
42 if "–" not in store_hours or ":" not in store_hours:
43 continue
44 parts = store_hours.strip().split(" – ")
45 open_time = self._parse_hour_str(parts[0])
46 close_time = self._parse_hour_str(parts[1])
47 opening_hours.add_range(day[0:2], open_time, close_time)
48
49 return opening_hours.as_opening_hours()
50
51
52
53 def parse_store(self, response):
54 store_name = response.xpath("//h4/text()").get().replace("Target – ","")
55 address_header = response.xpath("//span[@itemprop='streetAddress']/strong/text()").get()
56 address = " ".join(response.xpath("//span[@itemprop='streetAddress']/text()").getall()).strip()
57 if address_header:
58 address = address_header + " " + address
59 locality = response.xpath("//span[@itemprop='addressLocality']/text()").get()
60 region = response.xpath("//span[@itemprop='addressRegion']/text()").get()
61 post_code = response.xpath("//span[@itemprop='postalCode']/text()").get()
62 phone_number = response.xpath("//span[@itemprop='telephone']/text()").get()
63 hours_section = response.xpath("(//dl)[1]")[0]
64 opening_hours = self.parse_hours(hours_section)
65 lat = response.xpath("//div[@data-embedded-json='store-content-data']//@data-lat").get()
66 lon = response.xpath("//div[@data-embedded-json='store-content-data']//@data-lng").get()
67
68 yield GeojsonPointItem(lat=lat,
69 lon=lon,
70 name=store_name,
71 addr_full=address,
72 city=locality,
73 state=region,
74 postcode=post_code,
75 country="AU",
76 phone=phone_number,
77 website=response.url,
78 opening_hours=opening_hours,
79 ref=response.url.split("/")[-1])
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/target_au.py b/locations/spiders/target_au.py
--- a/locations/spiders/target_au.py
+++ b/locations/spiders/target_au.py
@@ -26,6 +26,8 @@
yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)
def _parse_hour_str(self, hour_string):
+ if hour_string == "Midnight":
+ return self._parse_hour_str("12:00 AM")
time_, am_pm = tuple(hour_string.split(" "))
hour, min = tuple(time_.split(":"))
hour = int(hour)
| {"golden_diff": "diff --git a/locations/spiders/target_au.py b/locations/spiders/target_au.py\n--- a/locations/spiders/target_au.py\n+++ b/locations/spiders/target_au.py\n@@ -26,6 +26,8 @@\n yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)\n \n def _parse_hour_str(self, hour_string):\n+ if hour_string == \"Midnight\":\n+ return self._parse_hour_str(\"12:00 AM\")\n time_, am_pm = tuple(hour_string.split(\" \"))\n hour, min = tuple(time_.split(\":\"))\n hour = int(hour)\n", "issue": "Spider target_au is broken\nDuring the global build at 2021-05-26-14-42-23, spider **target_au** failed with **0 features** and **16 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/target_au.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson))\n", "before_files": [{"content": "import scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass TargetAUSpider(scrapy.Spider):\n name = \"target_au\"\n item_attributes = { 'brand': \"Target\", 'brand_wikidata': \"Q7685854\" }\n allowed_domains = [\"target.com.au\"]\n states = [\"nsw\",\"vic\",\"qld\",\"nt\", \"act\", \"sa\", \"tas\", \"wa\"]\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n \"Referer\": \"https://www.target.com.au/store-finder\"}\n\n custom_settings = {'DOWNLOAD_DELAY' : 0.5,}\n\n def start_requests(self):\n url = \"https://www.target.com.au/store-finder/state/{}\"\n for state in self.states:\n yield scrapy.Request(url.format(state),headers=self.headers, callback=self.parse)\n\n\n def parse(self, response):\n store_links = response.xpath('//a[@class=\"table-tap-canonical\"]/@href').getall()\n for link in store_links:\n yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)\n\n def _parse_hour_str(self, hour_string):\n time_, am_pm = tuple(hour_string.split(\" \"))\n hour, min = tuple(time_.split(\":\"))\n hour = int(hour)\n if am_pm == \"PM\":\n hour += 12\n return f\"{hour}:{min}\"\n\n def parse_hours(self, hours_node):\n opening_hours = OpeningHours()\n days = hours_node.xpath(\".//dt/text()\").getall()\n hours = hours_node.xpath(\".//dd/text()\").getall()\n for idx, day in enumerate(days):\n store_hours = hours[idx]\n if \"\u2013\" not in store_hours or \":\" not in store_hours:\n continue\n parts = store_hours.strip().split(\" \u2013 \")\n open_time = self._parse_hour_str(parts[0])\n close_time = self._parse_hour_str(parts[1])\n opening_hours.add_range(day[0:2], open_time, close_time)\n \n return opening_hours.as_opening_hours()\n\n\n\n def parse_store(self, response):\n store_name = response.xpath(\"//h4/text()\").get().replace(\"Target \u2013 \",\"\")\n address_header = response.xpath(\"//span[@itemprop='streetAddress']/strong/text()\").get()\n address = \" \".join(response.xpath(\"//span[@itemprop='streetAddress']/text()\").getall()).strip()\n if address_header:\n address = address_header + \" \" + address\n locality = response.xpath(\"//span[@itemprop='addressLocality']/text()\").get()\n region = response.xpath(\"//span[@itemprop='addressRegion']/text()\").get()\n post_code = response.xpath(\"//span[@itemprop='postalCode']/text()\").get()\n phone_number = response.xpath(\"//span[@itemprop='telephone']/text()\").get()\n hours_section = response.xpath(\"(//dl)[1]\")[0]\n opening_hours = self.parse_hours(hours_section)\n lat = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lat\").get()\n lon = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lng\").get()\n\n yield GeojsonPointItem(lat=lat,\n lon=lon,\n name=store_name,\n addr_full=address,\n city=locality,\n state=region,\n postcode=post_code,\n country=\"AU\",\n phone=phone_number,\n website=response.url,\n opening_hours=opening_hours,\n ref=response.url.split(\"/\")[-1]) \n", "path": "locations/spiders/target_au.py"}], "after_files": [{"content": "import scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass TargetAUSpider(scrapy.Spider):\n name = \"target_au\"\n item_attributes = { 'brand': \"Target\", 'brand_wikidata': \"Q7685854\" }\n allowed_domains = [\"target.com.au\"]\n states = [\"nsw\",\"vic\",\"qld\",\"nt\", \"act\", \"sa\", \"tas\", \"wa\"]\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n \"Referer\": \"https://www.target.com.au/store-finder\"}\n\n custom_settings = {'DOWNLOAD_DELAY' : 0.5,}\n\n def start_requests(self):\n url = \"https://www.target.com.au/store-finder/state/{}\"\n for state in self.states:\n yield scrapy.Request(url.format(state),headers=self.headers, callback=self.parse)\n\n\n def parse(self, response):\n store_links = response.xpath('//a[@class=\"table-tap-canonical\"]/@href').getall()\n for link in store_links:\n yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)\n\n def _parse_hour_str(self, hour_string):\n if hour_string == \"Midnight\":\n return self._parse_hour_str(\"12:00 AM\")\n time_, am_pm = tuple(hour_string.split(\" \"))\n hour, min = tuple(time_.split(\":\"))\n hour = int(hour)\n if am_pm == \"PM\":\n hour += 12\n return f\"{hour}:{min}\"\n\n def parse_hours(self, hours_node):\n opening_hours = OpeningHours()\n days = hours_node.xpath(\".//dt/text()\").getall()\n hours = hours_node.xpath(\".//dd/text()\").getall()\n for idx, day in enumerate(days):\n store_hours = hours[idx]\n if \"\u2013\" not in store_hours or \":\" not in store_hours:\n continue\n parts = store_hours.strip().split(\" \u2013 \")\n open_time = self._parse_hour_str(parts[0])\n close_time = self._parse_hour_str(parts[1])\n opening_hours.add_range(day[0:2], open_time, close_time)\n \n return opening_hours.as_opening_hours()\n\n\n\n def parse_store(self, response):\n store_name = response.xpath(\"//h4/text()\").get().replace(\"Target \u2013 \",\"\")\n address_header = response.xpath(\"//span[@itemprop='streetAddress']/strong/text()\").get()\n address = \" \".join(response.xpath(\"//span[@itemprop='streetAddress']/text()\").getall()).strip()\n if address_header:\n address = address_header + \" \" + address\n locality = response.xpath(\"//span[@itemprop='addressLocality']/text()\").get()\n region = response.xpath(\"//span[@itemprop='addressRegion']/text()\").get()\n post_code = response.xpath(\"//span[@itemprop='postalCode']/text()\").get()\n phone_number = response.xpath(\"//span[@itemprop='telephone']/text()\").get()\n hours_section = response.xpath(\"(//dl)[1]\")[0]\n opening_hours = self.parse_hours(hours_section)\n lat = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lat\").get()\n lon = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lng\").get()\n\n yield GeojsonPointItem(lat=lat,\n lon=lon,\n name=store_name,\n addr_full=address,\n city=locality,\n state=region,\n postcode=post_code,\n country=\"AU\",\n phone=phone_number,\n website=response.url,\n opening_hours=opening_hours,\n ref=response.url.split(\"/\")[-1]) \n", "path": "locations/spiders/target_au.py"}]} | 1,422 | 141 |
gh_patches_debug_22562 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2372 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
text commenting results show module detail
when only text commenting module used for project, module detail also shown in results tab
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/documents/views.py`
Content:
```
1 from django.http import Http404
2 from django.http.response import HttpResponseRedirect
3 from django.urls import reverse
4 from django.utils.translation import ugettext_lazy as _
5 from django.views import generic
6
7 from adhocracy4.dashboard import mixins as dashboard_mixins
8 from adhocracy4.projects.mixins import ProjectMixin
9 from adhocracy4.rules import mixins as rules_mixins
10 from meinberlin.apps.contrib import mixins as contrib_mixins
11 from meinberlin.apps.exports.views import DashboardExportView
12
13 from . import models
14
15
16 class DocumentDashboardView(ProjectMixin,
17 dashboard_mixins.DashboardBaseMixin,
18 dashboard_mixins.DashboardComponentMixin,
19 generic.TemplateView):
20 template_name = 'meinberlin_documents/document_dashboard.html'
21 permission_required = 'a4projects.change_project'
22
23 def get_permission_object(self):
24 return self.project
25
26
27 class ChapterDetailView(ProjectMixin,
28 rules_mixins.PermissionRequiredMixin,
29 generic.DetailView,
30 contrib_mixins.DisplayProjectOrModuleMixin):
31 model = models.Chapter
32 permission_required = 'meinberlin_documents.view_chapter'
33 get_context_from_object = True
34
35 def dispatch(self, request, *args, **kwargs):
36 # Redirect first chapter view to the project detail page
37 res = super().dispatch(request, *args, **kwargs)
38 chapter = self.get_object()
39 if self.request.path == chapter.get_absolute_url() \
40 and chapter == self.chapter_list.first():
41 return HttpResponseRedirect(self.project.get_absolute_url())
42 else:
43 return res
44
45 def get_context_data(self, **kwargs):
46 context = super(ChapterDetailView, self).get_context_data(**kwargs)
47 context['chapter_list'] = self.chapter_list
48 return context
49
50 @property
51 def chapter_list(self):
52 return models.Chapter.objects.filter(module=self.module)
53
54
55 class DocumentDetailView(ChapterDetailView):
56 get_context_from_object = False
57
58 def get_object(self):
59 first_chapter = models.Chapter.objects \
60 .filter(module=self.module) \
61 .first()
62
63 if not first_chapter:
64 raise Http404(_('Document has no chapters defined.'))
65 return first_chapter
66
67
68 class ParagraphDetailView(ProjectMixin,
69 rules_mixins.PermissionRequiredMixin,
70 generic.DetailView):
71 model = models.Paragraph
72 permission_required = 'meinberlin_documents.view_paragraph'
73
74
75 class DocumentDashboardExportView(DashboardExportView):
76 template_name = 'meinberlin_exports/export_dashboard.html'
77
78 def get_context_data(self, **kwargs):
79 context = super().get_context_data(**kwargs)
80 context['comment_export'] = reverse(
81 'a4dashboard:document-comment-export',
82 kwargs={'module_slug': self.module.slug})
83 return context
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py
--- a/meinberlin/apps/documents/views.py
+++ b/meinberlin/apps/documents/views.py
@@ -1,5 +1,4 @@
from django.http import Http404
-from django.http.response import HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views import generic
@@ -32,16 +31,6 @@
permission_required = 'meinberlin_documents.view_chapter'
get_context_from_object = True
- def dispatch(self, request, *args, **kwargs):
- # Redirect first chapter view to the project detail page
- res = super().dispatch(request, *args, **kwargs)
- chapter = self.get_object()
- if self.request.path == chapter.get_absolute_url() \
- and chapter == self.chapter_list.first():
- return HttpResponseRedirect(self.project.get_absolute_url())
- else:
- return res
-
def get_context_data(self, **kwargs):
context = super(ChapterDetailView, self).get_context_data(**kwargs)
context['chapter_list'] = self.chapter_list
| {"golden_diff": "diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,5 +1,4 @@\n from django.http import Http404\n-from django.http.response import HttpResponseRedirect\n from django.urls import reverse\n from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n@@ -32,16 +31,6 @@\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n \n- def dispatch(self, request, *args, **kwargs):\n- # Redirect first chapter view to the project detail page\n- res = super().dispatch(request, *args, **kwargs)\n- chapter = self.get_object()\n- if self.request.path == chapter.get_absolute_url() \\\n- and chapter == self.chapter_list.first():\n- return HttpResponseRedirect(self.project.get_absolute_url())\n- else:\n- return res\n-\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n", "issue": "text commenting results show module detail \nwhen only text commenting module used for project, module detail also shown in results tab\n", "before_files": [{"content": "from django.http import Http404\nfrom django.http.response import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib import mixins as contrib_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n contrib_mixins.DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def dispatch(self, request, *args, **kwargs):\n # Redirect first chapter view to the project detail page\n res = super().dispatch(request, *args, **kwargs)\n chapter = self.get_object()\n if self.request.path == chapter.get_absolute_url() \\\n and chapter == self.chapter_list.first():\n return HttpResponseRedirect(self.project.get_absolute_url())\n else:\n return res\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib import mixins as contrib_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n contrib_mixins.DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}]} | 1,025 | 257 |
gh_patches_debug_1924 | rasdani/github-patches | git_diff | cobbler__cobbler-1265 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
build_reporting fails if empty string in ignorelist
The default configuration in the ubuntu 12.04 cobbler 2.6.5 package has the following in `/etc/settings`:
```
build_reporting_ignorelist = [""]
```
The code that reads this value is in `install_post_report.py`, and the condition that determines whether to send a build report email is:
```
for prefix in settings.build_reporting_ignorelist:
if name.lower().startswith(prefix) == True:
sendmail = False
```
With the default configuration, this check always succeeds, and **mail is not sent**.
Fix the issue by modifying the condition to:
```
if prefix != '' and name.lower().startswith(prefix):
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/modules/install_post_report.py`
Content:
```
1 # (c) 2008-2009
2 # Jeff Schroeder <[email protected]>
3 # Michael DeHaan <michael.dehaan AT gmail>
4 #
5 # License: GPLv2+
6
7 # Post install trigger for cobbler to
8 # send out a pretty email report that
9 # contains target information.
10
11 import distutils.sysconfig
12 import sys
13 import os
14 import traceback
15
16 plib = distutils.sysconfig.get_python_lib()
17 mod_path="%s/cobbler" % plib
18 sys.path.insert(0, mod_path)
19
20 from utils import _
21 import smtplib
22 import sys
23 import cobbler.templar as templar
24 from cobbler.cexceptions import CX
25 import utils
26
27 def register():
28 # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.
29 # the return of this method indicates the trigger type
30 return "/var/lib/cobbler/triggers/install/post/*"
31
32 def run(api, args, logger):
33 # FIXME: make everything use the logger
34
35 settings = api.settings()
36
37 # go no further if this feature is turned off
38 if not str(settings.build_reporting_enabled).lower() in [ "1", "yes", "y", "true"]:
39 return 0
40
41 objtype = args[0] # "target" or "profile"
42 name = args[1] # name of target or profile
43 boot_ip = args[2] # ip or "?"
44
45 if objtype == "system":
46 target = api.find_system(name)
47 else:
48 target = api.find_profile(name)
49
50 # collapse the object down to a rendered datastructure
51 target = utils.blender(api, False, target)
52
53 if target == {}:
54 raise CX("failure looking up target")
55
56 to_addr = settings.build_reporting_email
57 if to_addr == "":
58 return 0
59
60 # add the ability to specify an MTA for servers that don't run their own
61 smtp_server = settings.build_reporting_smtp_server
62 if smtp_server == "":
63 smtp_server = "localhost"
64
65 # use a custom from address or fall back to a reasonable default
66 from_addr = settings.build_reporting_sender
67 if from_addr == "":
68 from_addr = "cobbler@%s" % settings.server
69
70 subject = settings.build_reporting_subject
71 if subject == "":
72 subject = '[Cobbler] install complete '
73
74 to_addr = ",".join(to_addr)
75 metadata = {
76 "from_addr" : from_addr,
77 "to_addr" : to_addr,
78 "subject" : subject,
79 "boot_ip" : boot_ip
80 }
81 metadata.update(target)
82
83 input_template = open("/etc/cobbler/reporting/build_report_email.template")
84 input_data = input_template.read()
85 input_template.close()
86
87 message = templar.Templar(api._config).render(input_data, metadata, None)
88
89 # for debug, call
90 # print message
91
92 sendmail = True
93 for prefix in settings.build_reporting_ignorelist:
94 if name.lower().startswith(prefix) == True:
95 sendmail = False
96
97 if sendmail == True:
98 # Send the mail
99 # FIXME: on error, return non-zero
100 server_handle = smtplib.SMTP(smtp_server)
101 server_handle.sendmail(from_addr, to_addr.split(','), message)
102 server_handle.quit()
103
104 return 0
105
106
107
108
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/modules/install_post_report.py b/cobbler/modules/install_post_report.py
--- a/cobbler/modules/install_post_report.py
+++ b/cobbler/modules/install_post_report.py
@@ -91,7 +91,7 @@
sendmail = True
for prefix in settings.build_reporting_ignorelist:
- if name.lower().startswith(prefix) == True:
+ if prefix != '' and name.lower().startswith(prefix):
sendmail = False
if sendmail == True:
| {"golden_diff": "diff --git a/cobbler/modules/install_post_report.py b/cobbler/modules/install_post_report.py\n--- a/cobbler/modules/install_post_report.py\n+++ b/cobbler/modules/install_post_report.py\n@@ -91,7 +91,7 @@\n \n sendmail = True\n for prefix in settings.build_reporting_ignorelist:\n- if name.lower().startswith(prefix) == True:\n+ if prefix != '' and name.lower().startswith(prefix):\n sendmail = False\n \n if sendmail == True:\n", "issue": "build_reporting fails if empty string in ignorelist\nThe default configuration in the ubuntu 12.04 cobbler 2.6.5 package has the following in `/etc/settings`:\n\n```\nbuild_reporting_ignorelist = [\"\"]\n```\n\nThe code that reads this value is in `install_post_report.py`, and the condition that determines whether to send a build report email is:\n\n```\nfor prefix in settings.build_reporting_ignorelist:\n if name.lower().startswith(prefix) == True:\n sendmail = False\n```\n\nWith the default configuration, this check always succeeds, and **mail is not sent**.\n\nFix the issue by modifying the condition to:\n\n```\n if prefix != '' and name.lower().startswith(prefix):\n```\n\n", "before_files": [{"content": "# (c) 2008-2009\n# Jeff Schroeder <[email protected]>\n# Michael DeHaan <michael.dehaan AT gmail>\n#\n# License: GPLv2+\n\n# Post install trigger for cobbler to\n# send out a pretty email report that\n# contains target information.\n\nimport distutils.sysconfig\nimport sys\nimport os\nimport traceback\n\nplib = distutils.sysconfig.get_python_lib()\nmod_path=\"%s/cobbler\" % plib\nsys.path.insert(0, mod_path)\n\nfrom utils import _\nimport smtplib\nimport sys\nimport cobbler.templar as templar\nfrom cobbler.cexceptions import CX\nimport utils\n\ndef register():\n # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.\n # the return of this method indicates the trigger type\n return \"/var/lib/cobbler/triggers/install/post/*\"\n\ndef run(api, args, logger):\n # FIXME: make everything use the logger\n\n settings = api.settings()\n\n # go no further if this feature is turned off\n if not str(settings.build_reporting_enabled).lower() in [ \"1\", \"yes\", \"y\", \"true\"]:\n return 0\n\n objtype = args[0] # \"target\" or \"profile\"\n name = args[1] # name of target or profile\n boot_ip = args[2] # ip or \"?\"\n\n if objtype == \"system\":\n target = api.find_system(name)\n else:\n target = api.find_profile(name)\n\n # collapse the object down to a rendered datastructure\n target = utils.blender(api, False, target)\n\n if target == {}:\n raise CX(\"failure looking up target\")\n\n to_addr = settings.build_reporting_email\n if to_addr == \"\":\n return 0\n\n # add the ability to specify an MTA for servers that don't run their own\n smtp_server = settings.build_reporting_smtp_server\n if smtp_server == \"\":\n smtp_server = \"localhost\"\n\n # use a custom from address or fall back to a reasonable default\n from_addr = settings.build_reporting_sender\n if from_addr == \"\":\n from_addr = \"cobbler@%s\" % settings.server\n\n subject = settings.build_reporting_subject\n if subject == \"\":\n subject = '[Cobbler] install complete '\n\n to_addr = \",\".join(to_addr)\n metadata = {\n \"from_addr\" : from_addr,\n \"to_addr\" : to_addr,\n \"subject\" : subject,\n \"boot_ip\" : boot_ip\n }\n metadata.update(target)\n\n input_template = open(\"/etc/cobbler/reporting/build_report_email.template\")\n input_data = input_template.read()\n input_template.close()\n\n message = templar.Templar(api._config).render(input_data, metadata, None)\n \n # for debug, call\n # print message\n\n sendmail = True\n for prefix in settings.build_reporting_ignorelist:\n if name.lower().startswith(prefix) == True:\n sendmail = False\n\n if sendmail == True:\n # Send the mail\n # FIXME: on error, return non-zero\n server_handle = smtplib.SMTP(smtp_server)\n server_handle.sendmail(from_addr, to_addr.split(','), message)\n server_handle.quit()\n\n return 0\n\n\n\n\n", "path": "cobbler/modules/install_post_report.py"}], "after_files": [{"content": "# (c) 2008-2009\n# Jeff Schroeder <[email protected]>\n# Michael DeHaan <michael.dehaan AT gmail>\n#\n# License: GPLv2+\n\n# Post install trigger for cobbler to\n# send out a pretty email report that\n# contains target information.\n\nimport distutils.sysconfig\nimport sys\nimport os\nimport traceback\n\nplib = distutils.sysconfig.get_python_lib()\nmod_path=\"%s/cobbler\" % plib\nsys.path.insert(0, mod_path)\n\nfrom utils import _\nimport smtplib\nimport sys\nimport cobbler.templar as templar\nfrom cobbler.cexceptions import CX\nimport utils\n\ndef register():\n # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.\n # the return of this method indicates the trigger type\n return \"/var/lib/cobbler/triggers/install/post/*\"\n\ndef run(api, args, logger):\n # FIXME: make everything use the logger\n\n settings = api.settings()\n\n # go no further if this feature is turned off\n if not str(settings.build_reporting_enabled).lower() in [ \"1\", \"yes\", \"y\", \"true\"]:\n return 0\n\n objtype = args[0] # \"target\" or \"profile\"\n name = args[1] # name of target or profile\n boot_ip = args[2] # ip or \"?\"\n\n if objtype == \"system\":\n target = api.find_system(name)\n else:\n target = api.find_profile(name)\n\n # collapse the object down to a rendered datastructure\n target = utils.blender(api, False, target)\n\n if target == {}:\n raise CX(\"failure looking up target\")\n\n to_addr = settings.build_reporting_email\n if to_addr == \"\":\n return 0\n\n # add the ability to specify an MTA for servers that don't run their own\n smtp_server = settings.build_reporting_smtp_server\n if smtp_server == \"\":\n smtp_server = \"localhost\"\n\n # use a custom from address or fall back to a reasonable default\n from_addr = settings.build_reporting_sender\n if from_addr == \"\":\n from_addr = \"cobbler@%s\" % settings.server\n\n subject = settings.build_reporting_subject\n if subject == \"\":\n subject = '[Cobbler] install complete '\n\n to_addr = \",\".join(to_addr)\n metadata = {\n \"from_addr\" : from_addr,\n \"to_addr\" : to_addr,\n \"subject\" : subject,\n \"boot_ip\" : boot_ip\n }\n metadata.update(target)\n\n input_template = open(\"/etc/cobbler/reporting/build_report_email.template\")\n input_data = input_template.read()\n input_template.close()\n\n message = templar.Templar(api._config).render(input_data, metadata, None)\n \n # for debug, call\n # print message\n\n sendmail = True\n for prefix in settings.build_reporting_ignorelist:\n if prefix != '' and name.lower().startswith(prefix):\n sendmail = False\n\n if sendmail == True:\n # Send the mail\n # FIXME: on error, return non-zero\n server_handle = smtplib.SMTP(smtp_server)\n server_handle.sendmail(from_addr, to_addr.split(','), message)\n server_handle.quit()\n\n return 0\n\n\n\n\n", "path": "cobbler/modules/install_post_report.py"}]} | 1,390 | 111 |
gh_patches_debug_6862 | rasdani/github-patches | git_diff | doccano__doccano-1654 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I can't add members in the Django admin page.
I can't add members in the Django admin page.
steps
- Add a member in the admin page (click a SAVE button).
- <img width="1273" alt="スクリーンショット 2022-01-27 9 52 17" src="https://user-images.githubusercontent.com/20487308/151271702-bf60ae7e-f131-45fe-8314-e7726e90f90c.png">
- However, I get a 500 error.
- <img width="1085" alt="スクリーンショット 2022-01-27 9 53 08" src="https://user-images.githubusercontent.com/20487308/151271872-c3fa75e8-c491-4aff-b88e-c9d970406ede.png">
- The endpoints of the POST request are different between admin page and member page.
- `POST /admin/members/member/add/`
- `POST /v1/projects/1/members`
Environment
---------
doccano v1.5.5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/members/models.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth.models import User
3 from django.core.exceptions import ValidationError
4 from django.db import models
5
6 from django.db.models import Manager
7
8 from api.models import Project
9 from roles.models import Role
10
11
12 class MemberManager(Manager):
13
14 def can_update(self, project: int, member_id: int, new_role: str) -> bool:
15 """The project needs at least 1 admin.
16
17 Args:
18 project: The project id.
19 member_id: The member id.
20 new_role: The new role name.
21
22 Returns:
23 Whether the mapping can be updated or not.
24 """
25 queryset = self.filter(
26 project=project, role__name=settings.ROLE_PROJECT_ADMIN
27 )
28 if queryset.count() > 1:
29 return True
30 else:
31 admin = queryset.first()
32 # we can change the role except for the only admin.
33 return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN
34
35 def has_role(self, project_id: int, user: User, role_name: str):
36 return self.filter(project=project_id, user=user, role__name=role_name).exists()
37
38
39 class Member(models.Model):
40 user = models.ForeignKey(
41 to=User,
42 on_delete=models.CASCADE,
43 related_name='role_mappings'
44 )
45 project = models.ForeignKey(
46 to=Project,
47 on_delete=models.CASCADE,
48 related_name='role_mappings'
49 )
50 role = models.ForeignKey(
51 to=Role,
52 on_delete=models.CASCADE
53 )
54 created_at = models.DateTimeField(auto_now_add=True)
55 updated_at = models.DateTimeField(auto_now=True)
56 objects = MemberManager()
57
58 def clean(self):
59 members = self.objects.exclude(id=self.id)
60 if members.filter(user=self.user, project=self.project).exists():
61 message = 'This user is already assigned to a role in this project.'
62 raise ValidationError(message)
63
64 @property
65 def username(self):
66 return self.user.username
67
68 class Meta:
69 unique_together = ('user', 'project')
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/members/models.py b/backend/members/models.py
--- a/backend/members/models.py
+++ b/backend/members/models.py
@@ -56,7 +56,7 @@
objects = MemberManager()
def clean(self):
- members = self.objects.exclude(id=self.id)
+ members = self.__class__.objects.exclude(id=self.id)
if members.filter(user=self.user, project=self.project).exists():
message = 'This user is already assigned to a role in this project.'
raise ValidationError(message)
| {"golden_diff": "diff --git a/backend/members/models.py b/backend/members/models.py\n--- a/backend/members/models.py\n+++ b/backend/members/models.py\n@@ -56,7 +56,7 @@\n objects = MemberManager()\n \n def clean(self):\n- members = self.objects.exclude(id=self.id)\n+ members = self.__class__.objects.exclude(id=self.id)\n if members.filter(user=self.user, project=self.project).exists():\n message = 'This user is already assigned to a role in this project.'\n raise ValidationError(message)\n", "issue": "I can't add members in the Django admin page.\nI can't add members in the Django admin page.\r\n\r\nsteps\r\n- Add a member in the admin page (click a SAVE button).\r\n - <img width=\"1273\" alt=\"\u30b9\u30af\u30ea\u30fc\u30f3\u30b7\u30e7\u30c3\u30c8 2022-01-27 9 52 17\" src=\"https://user-images.githubusercontent.com/20487308/151271702-bf60ae7e-f131-45fe-8314-e7726e90f90c.png\">\r\n- However, I get a 500 error.\r\n - <img width=\"1085\" alt=\"\u30b9\u30af\u30ea\u30fc\u30f3\u30b7\u30e7\u30c3\u30c8 2022-01-27 9 53 08\" src=\"https://user-images.githubusercontent.com/20487308/151271872-c3fa75e8-c491-4aff-b88e-c9d970406ede.png\">\r\n- The endpoints of the POST request are different between admin page and member page.\r\n - `POST /admin/members/member/add/`\r\n - `POST /v1/projects/1/members`\r\n\r\nEnvironment\r\n---------\r\ndoccano v1.5.5\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom django.db.models import Manager\n\nfrom api.models import Project\nfrom roles.models import Role\n\n\nclass MemberManager(Manager):\n\n def can_update(self, project: int, member_id: int, new_role: str) -> bool:\n \"\"\"The project needs at least 1 admin.\n\n Args:\n project: The project id.\n member_id: The member id.\n new_role: The new role name.\n\n Returns:\n Whether the mapping can be updated or not.\n \"\"\"\n queryset = self.filter(\n project=project, role__name=settings.ROLE_PROJECT_ADMIN\n )\n if queryset.count() > 1:\n return True\n else:\n admin = queryset.first()\n # we can change the role except for the only admin.\n return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN\n\n def has_role(self, project_id: int, user: User, role_name: str):\n return self.filter(project=project_id, user=user, role__name=role_name).exists()\n\n\nclass Member(models.Model):\n user = models.ForeignKey(\n to=User,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n role = models.ForeignKey(\n to=Role,\n on_delete=models.CASCADE\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = MemberManager()\n\n def clean(self):\n members = self.objects.exclude(id=self.id)\n if members.filter(user=self.user, project=self.project).exists():\n message = 'This user is already assigned to a role in this project.'\n raise ValidationError(message)\n\n @property\n def username(self):\n return self.user.username\n\n class Meta:\n unique_together = ('user', 'project')\n", "path": "backend/members/models.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom django.db.models import Manager\n\nfrom api.models import Project\nfrom roles.models import Role\n\n\nclass MemberManager(Manager):\n\n def can_update(self, project: int, member_id: int, new_role: str) -> bool:\n \"\"\"The project needs at least 1 admin.\n\n Args:\n project: The project id.\n member_id: The member id.\n new_role: The new role name.\n\n Returns:\n Whether the mapping can be updated or not.\n \"\"\"\n queryset = self.filter(\n project=project, role__name=settings.ROLE_PROJECT_ADMIN\n )\n if queryset.count() > 1:\n return True\n else:\n admin = queryset.first()\n # we can change the role except for the only admin.\n return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN\n\n def has_role(self, project_id: int, user: User, role_name: str):\n return self.filter(project=project_id, user=user, role__name=role_name).exists()\n\n\nclass Member(models.Model):\n user = models.ForeignKey(\n to=User,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n role = models.ForeignKey(\n to=Role,\n on_delete=models.CASCADE\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = MemberManager()\n\n def clean(self):\n members = self.__class__.objects.exclude(id=self.id)\n if members.filter(user=self.user, project=self.project).exists():\n message = 'This user is already assigned to a role in this project.'\n raise ValidationError(message)\n\n @property\n def username(self):\n return self.user.username\n\n class Meta:\n unique_together = ('user', 'project')\n", "path": "backend/members/models.py"}]} | 1,128 | 116 |
gh_patches_debug_1438 | rasdani/github-patches | git_diff | matrix-org__synapse-7630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update SSO UIAuth login identifier to m.login.sso
I'm not sure when exactly we do this, but [MSC2454](https://github.com/matrix-org/matrix-doc/pull/2454) was merged which identified `m.login.sso` as the identifier for SSO + UIAuth. Synapse is currently using `org.matrix.login.sso`. At some point we should switch to the standardized version.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/api/constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2014-2016 OpenMarket Ltd
3 # Copyright 2017 Vector Creations Ltd
4 # Copyright 2018-2019 New Vector Ltd
5 # Copyright 2019 The Matrix.org Foundation C.I.C.
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18
19 """Contains constants from the specification."""
20
21 # the "depth" field on events is limited to 2**63 - 1
22 MAX_DEPTH = 2 ** 63 - 1
23
24 # the maximum length for a room alias is 255 characters
25 MAX_ALIAS_LENGTH = 255
26
27 # the maximum length for a user id is 255 characters
28 MAX_USERID_LENGTH = 255
29
30
31 class Membership(object):
32
33 """Represents the membership states of a user in a room."""
34
35 INVITE = "invite"
36 JOIN = "join"
37 KNOCK = "knock"
38 LEAVE = "leave"
39 BAN = "ban"
40 LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)
41
42
43 class PresenceState(object):
44 """Represents the presence state of a user."""
45
46 OFFLINE = "offline"
47 UNAVAILABLE = "unavailable"
48 ONLINE = "online"
49
50
51 class JoinRules(object):
52 PUBLIC = "public"
53 KNOCK = "knock"
54 INVITE = "invite"
55 PRIVATE = "private"
56
57
58 class LoginType(object):
59 PASSWORD = "m.login.password"
60 EMAIL_IDENTITY = "m.login.email.identity"
61 MSISDN = "m.login.msisdn"
62 RECAPTCHA = "m.login.recaptcha"
63 TERMS = "m.login.terms"
64 SSO = "org.matrix.login.sso"
65 DUMMY = "m.login.dummy"
66
67 # Only for C/S API v1
68 APPLICATION_SERVICE = "m.login.application_service"
69 SHARED_SECRET = "org.matrix.login.shared_secret"
70
71
72 class EventTypes(object):
73 Member = "m.room.member"
74 Create = "m.room.create"
75 Tombstone = "m.room.tombstone"
76 JoinRules = "m.room.join_rules"
77 PowerLevels = "m.room.power_levels"
78 Aliases = "m.room.aliases"
79 Redaction = "m.room.redaction"
80 ThirdPartyInvite = "m.room.third_party_invite"
81 RelatedGroups = "m.room.related_groups"
82
83 RoomHistoryVisibility = "m.room.history_visibility"
84 CanonicalAlias = "m.room.canonical_alias"
85 Encrypted = "m.room.encrypted"
86 RoomAvatar = "m.room.avatar"
87 RoomEncryption = "m.room.encryption"
88 GuestAccess = "m.room.guest_access"
89
90 # These are used for validation
91 Message = "m.room.message"
92 Topic = "m.room.topic"
93 Name = "m.room.name"
94
95 ServerACL = "m.room.server_acl"
96 Pinned = "m.room.pinned_events"
97
98 Retention = "m.room.retention"
99
100 Presence = "m.presence"
101
102
103 class RejectedReason(object):
104 AUTH_ERROR = "auth_error"
105
106
107 class RoomCreationPreset(object):
108 PRIVATE_CHAT = "private_chat"
109 PUBLIC_CHAT = "public_chat"
110 TRUSTED_PRIVATE_CHAT = "trusted_private_chat"
111
112
113 class ThirdPartyEntityKind(object):
114 USER = "user"
115 LOCATION = "location"
116
117
118 ServerNoticeMsgType = "m.server_notice"
119 ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
120
121
122 class UserTypes(object):
123 """Allows for user type specific behaviour. With the benefit of hindsight
124 'admin' and 'guest' users should also be UserTypes. Normal users are type None
125 """
126
127 SUPPORT = "support"
128 BOT = "bot"
129 ALL_USER_TYPES = (SUPPORT, BOT)
130
131
132 class RelationTypes(object):
133 """The types of relations known to this server.
134 """
135
136 ANNOTATION = "m.annotation"
137 REPLACE = "m.replace"
138 REFERENCE = "m.reference"
139
140
141 class LimitBlockingTypes(object):
142 """Reasons that a server may be blocked"""
143
144 MONTHLY_ACTIVE_USER = "monthly_active_user"
145 HS_DISABLED = "hs_disabled"
146
147
148 class EventContentFields(object):
149 """Fields found in events' content, regardless of type."""
150
151 # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
152 LABELS = "org.matrix.labels"
153
154 # Timestamp to delete the event after
155 # cf https://github.com/matrix-org/matrix-doc/pull/2228
156 SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after"
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/synapse/api/constants.py b/synapse/api/constants.py
--- a/synapse/api/constants.py
+++ b/synapse/api/constants.py
@@ -61,7 +61,7 @@
MSISDN = "m.login.msisdn"
RECAPTCHA = "m.login.recaptcha"
TERMS = "m.login.terms"
- SSO = "org.matrix.login.sso"
+ SSO = "m.login.sso"
DUMMY = "m.login.dummy"
# Only for C/S API v1
| {"golden_diff": "diff --git a/synapse/api/constants.py b/synapse/api/constants.py\n--- a/synapse/api/constants.py\n+++ b/synapse/api/constants.py\n@@ -61,7 +61,7 @@\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n- SSO = \"org.matrix.login.sso\"\n+ SSO = \"m.login.sso\"\n DUMMY = \"m.login.dummy\"\n \n # Only for C/S API v1\n", "issue": "Update SSO UIAuth login identifier to m.login.sso\nI'm not sure when exactly we do this, but [MSC2454](https://github.com/matrix-org/matrix-doc/pull/2454) was merged which identified `m.login.sso` as the identifier for SSO + UIAuth. Synapse is currently using `org.matrix.login.sso`. At some point we should switch to the standardized version.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018-2019 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains constants from the specification.\"\"\"\n\n# the \"depth\" field on events is limited to 2**63 - 1\nMAX_DEPTH = 2 ** 63 - 1\n\n# the maximum length for a room alias is 255 characters\nMAX_ALIAS_LENGTH = 255\n\n# the maximum length for a user id is 255 characters\nMAX_USERID_LENGTH = 255\n\n\nclass Membership(object):\n\n \"\"\"Represents the membership states of a user in a room.\"\"\"\n\n INVITE = \"invite\"\n JOIN = \"join\"\n KNOCK = \"knock\"\n LEAVE = \"leave\"\n BAN = \"ban\"\n LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)\n\n\nclass PresenceState(object):\n \"\"\"Represents the presence state of a user.\"\"\"\n\n OFFLINE = \"offline\"\n UNAVAILABLE = \"unavailable\"\n ONLINE = \"online\"\n\n\nclass JoinRules(object):\n PUBLIC = \"public\"\n KNOCK = \"knock\"\n INVITE = \"invite\"\n PRIVATE = \"private\"\n\n\nclass LoginType(object):\n PASSWORD = \"m.login.password\"\n EMAIL_IDENTITY = \"m.login.email.identity\"\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n SSO = \"org.matrix.login.sso\"\n DUMMY = \"m.login.dummy\"\n\n # Only for C/S API v1\n APPLICATION_SERVICE = \"m.login.application_service\"\n SHARED_SECRET = \"org.matrix.login.shared_secret\"\n\n\nclass EventTypes(object):\n Member = \"m.room.member\"\n Create = \"m.room.create\"\n Tombstone = \"m.room.tombstone\"\n JoinRules = \"m.room.join_rules\"\n PowerLevels = \"m.room.power_levels\"\n Aliases = \"m.room.aliases\"\n Redaction = \"m.room.redaction\"\n ThirdPartyInvite = \"m.room.third_party_invite\"\n RelatedGroups = \"m.room.related_groups\"\n\n RoomHistoryVisibility = \"m.room.history_visibility\"\n CanonicalAlias = \"m.room.canonical_alias\"\n Encrypted = \"m.room.encrypted\"\n RoomAvatar = \"m.room.avatar\"\n RoomEncryption = \"m.room.encryption\"\n GuestAccess = \"m.room.guest_access\"\n\n # These are used for validation\n Message = \"m.room.message\"\n Topic = \"m.room.topic\"\n Name = \"m.room.name\"\n\n ServerACL = \"m.room.server_acl\"\n Pinned = \"m.room.pinned_events\"\n\n Retention = \"m.room.retention\"\n\n Presence = \"m.presence\"\n\n\nclass RejectedReason(object):\n AUTH_ERROR = \"auth_error\"\n\n\nclass RoomCreationPreset(object):\n PRIVATE_CHAT = \"private_chat\"\n PUBLIC_CHAT = \"public_chat\"\n TRUSTED_PRIVATE_CHAT = \"trusted_private_chat\"\n\n\nclass ThirdPartyEntityKind(object):\n USER = \"user\"\n LOCATION = \"location\"\n\n\nServerNoticeMsgType = \"m.server_notice\"\nServerNoticeLimitReached = \"m.server_notice.usage_limit_reached\"\n\n\nclass UserTypes(object):\n \"\"\"Allows for user type specific behaviour. With the benefit of hindsight\n 'admin' and 'guest' users should also be UserTypes. Normal users are type None\n \"\"\"\n\n SUPPORT = \"support\"\n BOT = \"bot\"\n ALL_USER_TYPES = (SUPPORT, BOT)\n\n\nclass RelationTypes(object):\n \"\"\"The types of relations known to this server.\n \"\"\"\n\n ANNOTATION = \"m.annotation\"\n REPLACE = \"m.replace\"\n REFERENCE = \"m.reference\"\n\n\nclass LimitBlockingTypes(object):\n \"\"\"Reasons that a server may be blocked\"\"\"\n\n MONTHLY_ACTIVE_USER = \"monthly_active_user\"\n HS_DISABLED = \"hs_disabled\"\n\n\nclass EventContentFields(object):\n \"\"\"Fields found in events' content, regardless of type.\"\"\"\n\n # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326\n LABELS = \"org.matrix.labels\"\n\n # Timestamp to delete the event after\n # cf https://github.com/matrix-org/matrix-doc/pull/2228\n SELF_DESTRUCT_AFTER = \"org.matrix.self_destruct_after\"\n", "path": "synapse/api/constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018-2019 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains constants from the specification.\"\"\"\n\n# the \"depth\" field on events is limited to 2**63 - 1\nMAX_DEPTH = 2 ** 63 - 1\n\n# the maximum length for a room alias is 255 characters\nMAX_ALIAS_LENGTH = 255\n\n# the maximum length for a user id is 255 characters\nMAX_USERID_LENGTH = 255\n\n\nclass Membership(object):\n\n \"\"\"Represents the membership states of a user in a room.\"\"\"\n\n INVITE = \"invite\"\n JOIN = \"join\"\n KNOCK = \"knock\"\n LEAVE = \"leave\"\n BAN = \"ban\"\n LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)\n\n\nclass PresenceState(object):\n \"\"\"Represents the presence state of a user.\"\"\"\n\n OFFLINE = \"offline\"\n UNAVAILABLE = \"unavailable\"\n ONLINE = \"online\"\n\n\nclass JoinRules(object):\n PUBLIC = \"public\"\n KNOCK = \"knock\"\n INVITE = \"invite\"\n PRIVATE = \"private\"\n\n\nclass LoginType(object):\n PASSWORD = \"m.login.password\"\n EMAIL_IDENTITY = \"m.login.email.identity\"\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n SSO = \"m.login.sso\"\n DUMMY = \"m.login.dummy\"\n\n # Only for C/S API v1\n APPLICATION_SERVICE = \"m.login.application_service\"\n SHARED_SECRET = \"org.matrix.login.shared_secret\"\n\n\nclass EventTypes(object):\n Member = \"m.room.member\"\n Create = \"m.room.create\"\n Tombstone = \"m.room.tombstone\"\n JoinRules = \"m.room.join_rules\"\n PowerLevels = \"m.room.power_levels\"\n Aliases = \"m.room.aliases\"\n Redaction = \"m.room.redaction\"\n ThirdPartyInvite = \"m.room.third_party_invite\"\n RelatedGroups = \"m.room.related_groups\"\n\n RoomHistoryVisibility = \"m.room.history_visibility\"\n CanonicalAlias = \"m.room.canonical_alias\"\n Encrypted = \"m.room.encrypted\"\n RoomAvatar = \"m.room.avatar\"\n RoomEncryption = \"m.room.encryption\"\n GuestAccess = \"m.room.guest_access\"\n\n # These are used for validation\n Message = \"m.room.message\"\n Topic = \"m.room.topic\"\n Name = \"m.room.name\"\n\n ServerACL = \"m.room.server_acl\"\n Pinned = \"m.room.pinned_events\"\n\n Retention = \"m.room.retention\"\n\n Presence = \"m.presence\"\n\n\nclass RejectedReason(object):\n AUTH_ERROR = \"auth_error\"\n\n\nclass RoomCreationPreset(object):\n PRIVATE_CHAT = \"private_chat\"\n PUBLIC_CHAT = \"public_chat\"\n TRUSTED_PRIVATE_CHAT = \"trusted_private_chat\"\n\n\nclass ThirdPartyEntityKind(object):\n USER = \"user\"\n LOCATION = \"location\"\n\n\nServerNoticeMsgType = \"m.server_notice\"\nServerNoticeLimitReached = \"m.server_notice.usage_limit_reached\"\n\n\nclass UserTypes(object):\n \"\"\"Allows for user type specific behaviour. With the benefit of hindsight\n 'admin' and 'guest' users should also be UserTypes. Normal users are type None\n \"\"\"\n\n SUPPORT = \"support\"\n BOT = \"bot\"\n ALL_USER_TYPES = (SUPPORT, BOT)\n\n\nclass RelationTypes(object):\n \"\"\"The types of relations known to this server.\n \"\"\"\n\n ANNOTATION = \"m.annotation\"\n REPLACE = \"m.replace\"\n REFERENCE = \"m.reference\"\n\n\nclass LimitBlockingTypes(object):\n \"\"\"Reasons that a server may be blocked\"\"\"\n\n MONTHLY_ACTIVE_USER = \"monthly_active_user\"\n HS_DISABLED = \"hs_disabled\"\n\n\nclass EventContentFields(object):\n \"\"\"Fields found in events' content, regardless of type.\"\"\"\n\n # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326\n LABELS = \"org.matrix.labels\"\n\n # Timestamp to delete the event after\n # cf https://github.com/matrix-org/matrix-doc/pull/2228\n SELF_DESTRUCT_AFTER = \"org.matrix.self_destruct_after\"\n", "path": "synapse/api/constants.py"}]} | 1,848 | 124 |
gh_patches_debug_14335 | rasdani/github-patches | git_diff | web2py__web2py-2099 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extend RConn to be able to connect to different Redis servers from within the same web2py application
Right now it's not possible to connect to different Redis servers from within the same web2py application. Taking a look at the [code of RConn class](https://github.com/web2py/web2py/blob/f06c60b963a373f661e3bb09d5af49d2098902ec/gluon/contrib/redis_utils.py#L39), you can see that the first stablished connection made to a Redis server is linked to the current web2py application. And subsequent calls to RConn from within that web2py application will return the first created connection, no matter what the connection parameters are.
This is a problem if you need to connect to different Redis servers from within the same web2py application. Notice this is also a problem if some of the connection arguments change (host, port, password, etc).
I'm not shure what's the reason for returning always the first stablished connection, but I think a couple of fixes could be done in order to avoid this issues. I'll prepare a pull request with a proposal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gluon/contrib/redis_utils.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Developed by [email protected]
5 License MIT/BSD/GPL
6
7 Serves as base to implement Redis connection object and various utils
8 for redis_cache, redis_session and redis_scheduler in the future
9 Should-could be overriden in case redis doesn't keep up (e.g. cluster support)
10 to ensure compatibility with another - similar - library
11 """
12
13 import logging
14 from threading import Lock
15 import time
16 from gluon import current
17
18 logger = logging.getLogger("web2py.redis_utils")
19
20 try:
21 import redis
22 from redis.exceptions import WatchError as RWatchError
23 from redis.exceptions import ConnectionError as RConnectionError
24 except ImportError:
25 logger.error("Needs redis library to work")
26 raise RuntimeError('Needs redis library to work')
27
28
29 locker = Lock()
30
31
32 def RConn(*args, **vars):
33 """
34 Istantiates a StrictRedis connection with parameters, at the first time
35 only
36 """
37 locker.acquire()
38 try:
39 instance_name = 'redis_conn_' + current.request.application
40 if not hasattr(RConn, instance_name):
41 setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))
42 return getattr(RConn, instance_name)
43 finally:
44 locker.release()
45
46 def acquire_lock(conn, lockname, identifier, ltime=10):
47 while True:
48 if conn.set(lockname, identifier, ex=ltime, nx=True):
49 return identifier
50 time.sleep(.01)
51
52
53 _LUA_RELEASE_LOCK = """
54 if redis.call("get", KEYS[1]) == ARGV[1]
55 then
56 return redis.call("del", KEYS[1])
57 else
58 return 0
59 end
60 """
61
62
63 def release_lock(instance, lockname, identifier):
64 return instance._release_script(
65 keys=[lockname], args=[identifier])
66
67
68 def register_release_lock(conn):
69 rtn = conn.register_script(_LUA_RELEASE_LOCK)
70 return rtn
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gluon/contrib/redis_utils.py b/gluon/contrib/redis_utils.py
--- a/gluon/contrib/redis_utils.py
+++ b/gluon/contrib/redis_utils.py
@@ -29,14 +29,16 @@
locker = Lock()
-def RConn(*args, **vars):
+def RConn(application=None, *args, **vars):
"""
Istantiates a StrictRedis connection with parameters, at the first time
only
"""
locker.acquire()
try:
- instance_name = 'redis_conn_' + current.request.application
+ if application is None:
+ application = current.request.application
+ instance_name = 'redis_conn_' + application
if not hasattr(RConn, instance_name):
setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))
return getattr(RConn, instance_name)
| {"golden_diff": "diff --git a/gluon/contrib/redis_utils.py b/gluon/contrib/redis_utils.py\n--- a/gluon/contrib/redis_utils.py\n+++ b/gluon/contrib/redis_utils.py\n@@ -29,14 +29,16 @@\n locker = Lock()\n \n \n-def RConn(*args, **vars):\n+def RConn(application=None, *args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n- instance_name = 'redis_conn_' + current.request.application\n+ if application is None:\n+ application = current.request.application\n+ instance_name = 'redis_conn_' + application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n", "issue": "Extend RConn to be able to connect to different Redis servers from within the same web2py application\nRight now it's not possible to connect to different Redis servers from within the same web2py application. Taking a look at the [code of RConn class](https://github.com/web2py/web2py/blob/f06c60b963a373f661e3bb09d5af49d2098902ec/gluon/contrib/redis_utils.py#L39), you can see that the first stablished connection made to a Redis server is linked to the current web2py application. And subsequent calls to RConn from within that web2py application will return the first created connection, no matter what the connection parameters are.\r\n\r\nThis is a problem if you need to connect to different Redis servers from within the same web2py application. Notice this is also a problem if some of the connection arguments change (host, port, password, etc). \r\n\r\nI'm not shure what's the reason for returning always the first stablished connection, but I think a couple of fixes could be done in order to avoid this issues. I'll prepare a pull request with a proposal. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nServes as base to implement Redis connection object and various utils\nfor redis_cache, redis_session and redis_scheduler in the future\nShould-could be overriden in case redis doesn't keep up (e.g. cluster support)\nto ensure compatibility with another - similar - library\n\"\"\"\n\nimport logging\nfrom threading import Lock\nimport time\nfrom gluon import current\n\nlogger = logging.getLogger(\"web2py.redis_utils\")\n\ntry:\n import redis\n from redis.exceptions import WatchError as RWatchError\n from redis.exceptions import ConnectionError as RConnectionError\nexcept ImportError:\n logger.error(\"Needs redis library to work\")\n raise RuntimeError('Needs redis library to work')\n\n\nlocker = Lock()\n\n\ndef RConn(*args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n instance_name = 'redis_conn_' + current.request.application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n finally:\n locker.release()\n\ndef acquire_lock(conn, lockname, identifier, ltime=10):\n while True:\n if conn.set(lockname, identifier, ex=ltime, nx=True):\n return identifier\n time.sleep(.01)\n\n\n_LUA_RELEASE_LOCK = \"\"\"\nif redis.call(\"get\", KEYS[1]) == ARGV[1]\nthen\n return redis.call(\"del\", KEYS[1])\nelse\n return 0\nend\n\"\"\"\n\n\ndef release_lock(instance, lockname, identifier):\n return instance._release_script(\n keys=[lockname], args=[identifier])\n\n\ndef register_release_lock(conn):\n rtn = conn.register_script(_LUA_RELEASE_LOCK)\n return rtn\n", "path": "gluon/contrib/redis_utils.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nServes as base to implement Redis connection object and various utils\nfor redis_cache, redis_session and redis_scheduler in the future\nShould-could be overriden in case redis doesn't keep up (e.g. cluster support)\nto ensure compatibility with another - similar - library\n\"\"\"\n\nimport logging\nfrom threading import Lock\nimport time\nfrom gluon import current\n\nlogger = logging.getLogger(\"web2py.redis_utils\")\n\ntry:\n import redis\n from redis.exceptions import WatchError as RWatchError\n from redis.exceptions import ConnectionError as RConnectionError\nexcept ImportError:\n logger.error(\"Needs redis library to work\")\n raise RuntimeError('Needs redis library to work')\n\n\nlocker = Lock()\n\n\ndef RConn(application=None, *args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n if application is None:\n application = current.request.application\n instance_name = 'redis_conn_' + application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n finally:\n locker.release()\n\ndef acquire_lock(conn, lockname, identifier, ltime=10):\n while True:\n if conn.set(lockname, identifier, ex=ltime, nx=True):\n return identifier\n time.sleep(.01)\n\n\n_LUA_RELEASE_LOCK = \"\"\"\nif redis.call(\"get\", KEYS[1]) == ARGV[1]\nthen\n return redis.call(\"del\", KEYS[1])\nelse\n return 0\nend\n\"\"\"\n\n\ndef release_lock(instance, lockname, identifier):\n return instance._release_script(\n keys=[lockname], args=[identifier])\n\n\ndef register_release_lock(conn):\n rtn = conn.register_script(_LUA_RELEASE_LOCK)\n return rtn\n", "path": "gluon/contrib/redis_utils.py"}]} | 1,086 | 196 |
gh_patches_debug_11402 | rasdani/github-patches | git_diff | xorbitsai__inference-777 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
XINFERENCE_HOME环境变量问题
hi , 我这边设置了XINFERENCE_HOME环境变量,但是去指定的目录下看到里面的模型都是软连接,这是什么原因,谢谢!

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xinference/constants.py`
Content:
```
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from pathlib import Path
17
18 XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT"
19 XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC"
20 XINFERENCE_ENV_HOME_PATH = "XINFERENCE_HOME"
21 XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = "XINFERENCE_HEALTH_CHECK_ATTEMPTS"
22 XINFERENCE_ENV_HEALTH_CHECK_INTERVAL = "XINFERENCE_HEALTH_CHECK_INTERVAL"
23 XINFERENCE_ENV_DISABLE_VLLM = "XINFERENCE_DISABLE_VLLM"
24
25
26 def get_xinference_home():
27 return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference"))
28
29
30 XINFERENCE_HOME = get_xinference_home()
31 XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache")
32 XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model")
33 XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs")
34 XINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, "image")
35
36 XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1"
37 XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0"
38 XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997
39 XINFERENCE_DEFAULT_LOG_FILE_NAME = "xinference.log"
40 XINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024
41 XINFERENCE_LOG_BACKUP_COUNT = 30
42 XINFERENCE_HEALTH_CHECK_ATTEMPTS = int(
43 os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3)
44 )
45 XINFERENCE_HEALTH_CHECK_INTERVAL = int(
46 os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3)
47 )
48 XINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0)))
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xinference/constants.py b/xinference/constants.py
--- a/xinference/constants.py
+++ b/xinference/constants.py
@@ -23,8 +23,15 @@
XINFERENCE_ENV_DISABLE_VLLM = "XINFERENCE_DISABLE_VLLM"
-def get_xinference_home():
- return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference"))
+def get_xinference_home() -> str:
+ home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH)
+ if home_path is None:
+ home_path = str(Path.home() / ".xinference")
+ else:
+ # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path
+ os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.join(home_path, "huggingface")
+ os.environ["MODELSCOPE_CACHE"] = os.path.join(home_path, "modelscope")
+ return home_path
XINFERENCE_HOME = get_xinference_home()
| {"golden_diff": "diff --git a/xinference/constants.py b/xinference/constants.py\n--- a/xinference/constants.py\n+++ b/xinference/constants.py\n@@ -23,8 +23,15 @@\n XINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n \n \n-def get_xinference_home():\n- return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n+def get_xinference_home() -> str:\n+ home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH)\n+ if home_path is None:\n+ home_path = str(Path.home() / \".xinference\")\n+ else:\n+ # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path\n+ os.environ[\"HUGGINGFACE_HUB_CACHE\"] = os.path.join(home_path, \"huggingface\")\n+ os.environ[\"MODELSCOPE_CACHE\"] = os.path.join(home_path, \"modelscope\")\n+ return home_path\n \n \n XINFERENCE_HOME = get_xinference_home()\n", "issue": "XINFERENCE_HOME\u73af\u5883\u53d8\u91cf\u95ee\u9898\nhi , \u6211\u8fd9\u8fb9\u8bbe\u7f6e\u4e86XINFERENCE_HOME\u73af\u5883\u53d8\u91cf\uff0c\u4f46\u662f\u53bb\u6307\u5b9a\u7684\u76ee\u5f55\u4e0b\u770b\u5230\u91cc\u9762\u7684\u6a21\u578b\u90fd\u662f\u8f6f\u8fde\u63a5\uff0c\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\uff0c\u8c22\u8c22!\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\nXINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\nXINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = \"XINFERENCE_HEALTH_CHECK_ATTEMPTS\"\nXINFERENCE_ENV_HEALTH_CHECK_INTERVAL = \"XINFERENCE_HEALTH_CHECK_INTERVAL\"\nXINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n\n\ndef get_xinference_home():\n return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n\n\nXINFERENCE_HOME = get_xinference_home()\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\nXINFERENCE_DEFAULT_LOG_FILE_NAME = \"xinference.log\"\nXINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024\nXINFERENCE_LOG_BACKUP_COUNT = 30\nXINFERENCE_HEALTH_CHECK_ATTEMPTS = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3)\n)\nXINFERENCE_HEALTH_CHECK_INTERVAL = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3)\n)\nXINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0)))\n", "path": "xinference/constants.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\nXINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\nXINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = \"XINFERENCE_HEALTH_CHECK_ATTEMPTS\"\nXINFERENCE_ENV_HEALTH_CHECK_INTERVAL = \"XINFERENCE_HEALTH_CHECK_INTERVAL\"\nXINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n\n\ndef get_xinference_home() -> str:\n home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH)\n if home_path is None:\n home_path = str(Path.home() / \".xinference\")\n else:\n # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path\n os.environ[\"HUGGINGFACE_HUB_CACHE\"] = os.path.join(home_path, \"huggingface\")\n os.environ[\"MODELSCOPE_CACHE\"] = os.path.join(home_path, \"modelscope\")\n return home_path\n\n\nXINFERENCE_HOME = get_xinference_home()\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\nXINFERENCE_DEFAULT_LOG_FILE_NAME = \"xinference.log\"\nXINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024\nXINFERENCE_LOG_BACKUP_COUNT = 30\nXINFERENCE_HEALTH_CHECK_ATTEMPTS = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3)\n)\nXINFERENCE_HEALTH_CHECK_INTERVAL = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3)\n)\nXINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0)))\n", "path": "xinference/constants.py"}]} | 967 | 236 |
gh_patches_debug_22760 | rasdani/github-patches | git_diff | carpentries__amy-1065 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bulk import workflow encounters IntegrityError when saving an organization
Currently, we allow organizations with the domain that contains the `www` subdomain. For eg: Google can exist as `www.google.com` as well as `google.com`, leading to `IntegrityError` while saving the first while the second exists.
Shouldn't we enforce one URL pattern and trim/add `www` to the `domain` field when saving an organization?
Testcase:
``` py
In [5]: Organization.objects.create(fullname='Google', domain='google.com')
Out[5]: <Organization: google.com>
In [6]: Organization.objects.create(fullname='Google', domain='www.google.com')
---------------------------------------------------------------------------
IntegrityError Traceback (most recent call last)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydata/api.py`
Content:
```
1 from functools import lru_cache
2 from json import JSONDecodeError
3 from urllib.parse import urljoin, urlparse
4
5 import requests
6 from django.conf import settings
7
8 from workshops.models import (
9 Person,
10 Role,
11 Organization,
12 Sponsorship,
13 Task,
14 )
15 from workshops.util import create_username
16
17
18 class BaseAPIClient(requests.Session):
19 """
20 An API client that abstracts away the work of dealing with URLs.
21 Usage:
22 > client = APIClient(event)
23 > list(client) -> returns a list of all objects returned by the API.
24 > client[23] -> returns the object with pk=23
25 """
26 ROOT_ENDPOINT = 'api/'
27
28 @lru_cache(maxsize=None)
29 def __new__(cls, event):
30 """
31 Returns an instance of APIClient.
32 Throws NotImplementedError if an API does not exist at the root URL.
33 """
34 try:
35 r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))
36 r.raise_for_status()
37 r.json()
38 except (requests.exceptions.HTTPError, JSONDecodeError):
39 raise NotImplementedError('Conference site does not support an API')
40 return super().__new__(cls)
41
42 def __init__(self, event):
43 '''Populate API endpoint and set up basic authentication'''
44 super().__init__()
45 self.event = event
46 self.endpoint = urljoin(event.url, self.ENDPOINT)
47 self.auth = (
48 settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)
49
50 def __iter__(self):
51 try:
52 r = self.get(self.endpoint)
53 r.raise_for_status()
54 pydata_objs = r.json()
55 except (requests.exceptions.HTTPError, JSONDecodeError) as e:
56 raise IOError('Cannot fetch instances from API: {}'.format(str(e)))
57 for obj in pydata_objs:
58 yield self.parse(obj)
59
60 def __contains__(self, pk):
61 try:
62 self.get(self.endpoint + str(pk)).raise_for_status()
63 except requests.exceptions.HTTPError:
64 return False
65 else:
66 return True
67
68 def __getitem__(self, pk):
69 if pk not in self:
70 raise KeyError(
71 '{} does not exist'.format(self.model._meta.verbose_name)
72 )
73 obj = self.get(self.endpoint + str(pk)).json()
74 return self.parse(obj)
75
76
77 class PersonAPIClient(BaseAPIClient):
78 ENDPOINT = 'api/speaker/'
79 model = Person
80
81 def parse(self, speaker):
82 speaker['name'] = speaker['name'].strip()
83 personal = speaker['name'].rsplit(' ', 1)[0]
84 family = speaker['name'].rsplit(' ', 1)[-1]
85 return Person(
86 username=speaker['username'],
87 personal=personal,
88 family=family,
89 email=speaker['email'],
90 url=speaker['absolute_url'],
91 )
92
93
94 class TaskAPIClient(BaseAPIClient):
95 ENDPOINT = 'api/presentation/'
96 model = Task
97
98 def parse(self, presentation):
99 return Task(
100 event=self.event,
101 person=Person.objects.get_or_create(
102 email=presentation['speaker']['email'],
103 defaults={
104 'username': create_username('', presentation['speaker']['username']),
105 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],
106 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],
107 'url': presentation['speaker']['absolute_url'],
108 }
109 )[0],
110 role=Role.objects.get(name='presenter'),
111 title=presentation['title'],
112 url=presentation['absolute_url'],
113 )
114
115
116 class SponsorshipAPIClient(BaseAPIClient):
117 ENDPOINT = 'api/sponsor/'
118 model = Sponsorship
119
120 def parse(self, sponsor):
121 return Sponsorship(
122 organization=Organization.objects.get_or_create(
123 domain=urlparse(sponsor['external_url']).netloc,
124 defaults={
125 'fullname': sponsor['name'],
126 'notes': sponsor['annotation'],
127 },
128 )[0],
129 event=self.event,
130 amount=sponsor['level']['cost'],
131 contact=Person.objects.get_or_create(
132 email=sponsor['contact_email'],
133 defaults={
134 'username': create_username('', sponsor['contact_name']),
135 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],
136 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],
137 },
138 )[0],
139 )
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydata/api.py b/pydata/api.py
--- a/pydata/api.py
+++ b/pydata/api.py
@@ -4,6 +4,7 @@
import requests
from django.conf import settings
+from django.db.models import Q
from workshops.models import (
Person,
@@ -118,14 +119,18 @@
model = Sponsorship
def parse(self, sponsor):
+ domain = urlparse(sponsor['external_url']).netloc
+ organization = Organization.objects.filter(
+ Q(fullname=sponsor['name']) | Q(domain=domain)
+ ).first()
+ if not organization:
+ organization = Organization.objects.create(
+ fullname=sponsor['name'],
+ domain=domain,
+ notes=sponsor['annotation'],
+ )
return Sponsorship(
- organization=Organization.objects.get_or_create(
- domain=urlparse(sponsor['external_url']).netloc,
- defaults={
- 'fullname': sponsor['name'],
- 'notes': sponsor['annotation'],
- },
- )[0],
+ organization=organization,
event=self.event,
amount=sponsor['level']['cost'],
contact=Person.objects.get_or_create(
| {"golden_diff": "diff --git a/pydata/api.py b/pydata/api.py\n--- a/pydata/api.py\n+++ b/pydata/api.py\n@@ -4,6 +4,7 @@\n \n import requests\n from django.conf import settings\n+from django.db.models import Q\n \n from workshops.models import (\n Person,\n@@ -118,14 +119,18 @@\n model = Sponsorship\n \n def parse(self, sponsor):\n+ domain = urlparse(sponsor['external_url']).netloc\n+ organization = Organization.objects.filter(\n+ Q(fullname=sponsor['name']) | Q(domain=domain)\n+ ).first()\n+ if not organization:\n+ organization = Organization.objects.create(\n+ fullname=sponsor['name'],\n+ domain=domain,\n+ notes=sponsor['annotation'],\n+ )\n return Sponsorship(\n- organization=Organization.objects.get_or_create(\n- domain=urlparse(sponsor['external_url']).netloc,\n- defaults={\n- 'fullname': sponsor['name'],\n- 'notes': sponsor['annotation'],\n- },\n- )[0],\n+ organization=organization,\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n", "issue": "Bulk import workflow encounters IntegrityError when saving an organization\nCurrently, we allow organizations with the domain that contains the `www` subdomain. For eg: Google can exist as `www.google.com` as well as `google.com`, leading to `IntegrityError` while saving the first while the second exists.\n\nShouldn't we enforce one URL pattern and trim/add `www` to the `domain` field when saving an organization?\n\nTestcase:\n\n``` py\nIn [5]: Organization.objects.create(fullname='Google', domain='google.com')\nOut[5]: <Organization: google.com>\n\nIn [6]: Organization.objects.create(fullname='Google', domain='www.google.com')\n---------------------------------------------------------------------------\nIntegrityError Traceback (most recent call last)\n```\n\n", "before_files": [{"content": "from functools import lru_cache\nfrom json import JSONDecodeError\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom django.conf import settings\n\nfrom workshops.models import (\n Person,\n Role,\n Organization,\n Sponsorship,\n Task,\n)\nfrom workshops.util import create_username\n\n\nclass BaseAPIClient(requests.Session):\n \"\"\"\n An API client that abstracts away the work of dealing with URLs.\n Usage:\n > client = APIClient(event)\n > list(client) -> returns a list of all objects returned by the API.\n > client[23] -> returns the object with pk=23\n \"\"\"\n ROOT_ENDPOINT = 'api/'\n\n @lru_cache(maxsize=None)\n def __new__(cls, event):\n \"\"\"\n Returns an instance of APIClient.\n Throws NotImplementedError if an API does not exist at the root URL.\n \"\"\"\n try:\n r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))\n r.raise_for_status()\n r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n raise NotImplementedError('Conference site does not support an API')\n return super().__new__(cls)\n\n def __init__(self, event):\n '''Populate API endpoint and set up basic authentication'''\n super().__init__()\n self.event = event\n self.endpoint = urljoin(event.url, self.ENDPOINT)\n self.auth = (\n settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)\n\n def __iter__(self):\n try:\n r = self.get(self.endpoint)\n r.raise_for_status()\n pydata_objs = r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError) as e:\n raise IOError('Cannot fetch instances from API: {}'.format(str(e)))\n for obj in pydata_objs:\n yield self.parse(obj)\n\n def __contains__(self, pk):\n try:\n self.get(self.endpoint + str(pk)).raise_for_status()\n except requests.exceptions.HTTPError:\n return False\n else:\n return True\n\n def __getitem__(self, pk):\n if pk not in self:\n raise KeyError(\n '{} does not exist'.format(self.model._meta.verbose_name)\n )\n obj = self.get(self.endpoint + str(pk)).json()\n return self.parse(obj)\n\n\nclass PersonAPIClient(BaseAPIClient):\n ENDPOINT = 'api/speaker/'\n model = Person\n\n def parse(self, speaker):\n speaker['name'] = speaker['name'].strip()\n personal = speaker['name'].rsplit(' ', 1)[0]\n family = speaker['name'].rsplit(' ', 1)[-1]\n return Person(\n username=speaker['username'],\n personal=personal,\n family=family,\n email=speaker['email'],\n url=speaker['absolute_url'],\n )\n\n\nclass TaskAPIClient(BaseAPIClient):\n ENDPOINT = 'api/presentation/'\n model = Task\n\n def parse(self, presentation):\n return Task(\n event=self.event,\n person=Person.objects.get_or_create(\n email=presentation['speaker']['email'],\n defaults={\n 'username': create_username('', presentation['speaker']['username']),\n 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],\n 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],\n 'url': presentation['speaker']['absolute_url'],\n }\n )[0],\n role=Role.objects.get(name='presenter'),\n title=presentation['title'],\n url=presentation['absolute_url'],\n )\n\n\nclass SponsorshipAPIClient(BaseAPIClient):\n ENDPOINT = 'api/sponsor/'\n model = Sponsorship\n\n def parse(self, sponsor):\n return Sponsorship(\n organization=Organization.objects.get_or_create(\n domain=urlparse(sponsor['external_url']).netloc,\n defaults={\n 'fullname': sponsor['name'],\n 'notes': sponsor['annotation'],\n },\n )[0],\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n email=sponsor['contact_email'],\n defaults={\n 'username': create_username('', sponsor['contact_name']),\n 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],\n 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],\n },\n )[0],\n )\n", "path": "pydata/api.py"}], "after_files": [{"content": "from functools import lru_cache\nfrom json import JSONDecodeError\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom workshops.models import (\n Person,\n Role,\n Organization,\n Sponsorship,\n Task,\n)\nfrom workshops.util import create_username\n\n\nclass BaseAPIClient(requests.Session):\n \"\"\"\n An API client that abstracts away the work of dealing with URLs.\n Usage:\n > client = APIClient(event)\n > list(client) -> returns a list of all objects returned by the API.\n > client[23] -> returns the object with pk=23\n \"\"\"\n ROOT_ENDPOINT = 'api/'\n\n @lru_cache(maxsize=None)\n def __new__(cls, event):\n \"\"\"\n Returns an instance of APIClient.\n Throws NotImplementedError if an API does not exist at the root URL.\n \"\"\"\n try:\n r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))\n r.raise_for_status()\n r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n raise NotImplementedError('Conference site does not support an API')\n return super().__new__(cls)\n\n def __init__(self, event):\n '''Populate API endpoint and set up basic authentication'''\n super().__init__()\n self.event = event\n self.endpoint = urljoin(event.url, self.ENDPOINT)\n self.auth = (\n settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)\n\n def __iter__(self):\n try:\n r = self.get(self.endpoint)\n r.raise_for_status()\n pydata_objs = r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError) as e:\n raise IOError('Cannot fetch instances from API: {}'.format(str(e)))\n for obj in pydata_objs:\n yield self.parse(obj)\n\n def __contains__(self, pk):\n try:\n self.get(self.endpoint + str(pk)).raise_for_status()\n except requests.exceptions.HTTPError:\n return False\n else:\n return True\n\n def __getitem__(self, pk):\n if pk not in self:\n raise KeyError(\n '{} does not exist'.format(self.model._meta.verbose_name)\n )\n obj = self.get(self.endpoint + str(pk)).json()\n return self.parse(obj)\n\n\nclass PersonAPIClient(BaseAPIClient):\n ENDPOINT = 'api/speaker/'\n model = Person\n\n def parse(self, speaker):\n speaker['name'] = speaker['name'].strip()\n personal = speaker['name'].rsplit(' ', 1)[0]\n family = speaker['name'].rsplit(' ', 1)[-1]\n return Person(\n username=speaker['username'],\n personal=personal,\n family=family,\n email=speaker['email'],\n url=speaker['absolute_url'],\n )\n\n\nclass TaskAPIClient(BaseAPIClient):\n ENDPOINT = 'api/presentation/'\n model = Task\n\n def parse(self, presentation):\n return Task(\n event=self.event,\n person=Person.objects.get_or_create(\n email=presentation['speaker']['email'],\n defaults={\n 'username': create_username('', presentation['speaker']['username']),\n 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],\n 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],\n 'url': presentation['speaker']['absolute_url'],\n }\n )[0],\n role=Role.objects.get(name='presenter'),\n title=presentation['title'],\n url=presentation['absolute_url'],\n )\n\n\nclass SponsorshipAPIClient(BaseAPIClient):\n ENDPOINT = 'api/sponsor/'\n model = Sponsorship\n\n def parse(self, sponsor):\n domain = urlparse(sponsor['external_url']).netloc\n organization = Organization.objects.filter(\n Q(fullname=sponsor['name']) | Q(domain=domain)\n ).first()\n if not organization:\n organization = Organization.objects.create(\n fullname=sponsor['name'],\n domain=domain,\n notes=sponsor['annotation'],\n )\n return Sponsorship(\n organization=organization,\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n email=sponsor['contact_email'],\n defaults={\n 'username': create_username('', sponsor['contact_name']),\n 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],\n 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],\n },\n )[0],\n )\n", "path": "pydata/api.py"}]} | 1,665 | 263 |
gh_patches_debug_23049 | rasdani/github-patches | git_diff | StackStorm__st2-5775 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add query type to linux.dig action
## SUMMARY
I would like the ability to query TXT records and noticed there is no way to specify a query type to the dig action.
### STACKSTORM VERSION
`st2 3.6.0, on Python 3.6.8`
## Steps to reproduce the problem
I attempted a few ways to add "TXT" to the query by adding to queryopts or try appending to the string hostname. Upon looking at the code I realized nothing like that would work.
## Expected Results
Get a list returned of TXT records
## Some sample code to add it
```
class DigAction(Action):
def run(self, rand, count, nameserver, hostname, queryopts, querytype): # Add querytype parameter
opt_list = []
output = []
cmd_args = ["dig"]
if nameserver:
nameserver = "@" + nameserver
cmd_args.append(nameserver)
if isinstance(queryopts, str) and "," in queryopts:
opt_list = queryopts.split(",")
else:
opt_list.append(queryopts)
cmd_args.extend(["+" + option for option in opt_list])
cmd_args.append(hostname)
cmd_args.append(querytype) # append query type (Default is set to "A" in dig.yaml)
try:
raw_result = subprocess.Popen(
cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE
).communicate()[0]
if sys.version_info >= (3,):
# This function might call getpreferred encoding unless we pass
# do_setlocale=False.
encoding = locale.getpreferredencoding(do_setlocale=False)
result_list_str = raw_result.decode(encoding)
else:
result_list_str = str(raw_result)
if querytype.lower() == "txt": # improve the output formatting result of TXT records
result_list_str = result_list_str.replace('"', '') # strip quotes so we don't see \" wrapped around output
result_list = list(filter(None, result_list_str.split("\n")))
```
I only spent a few minutes on this code to test making it work for me. It could be improved on to make sure works for other types as well. I added inline comments to show the only lines I added
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib/linux/actions/dig.py`
Content:
```
1 #! /usr/bin/python
2
3 # Copyright 2020 The StackStorm Authors.
4 # Copyright 2019 Extreme Networks, Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 import errno
19 import locale
20 import subprocess
21 import random
22 import sys
23
24 from st2common.runners.base_action import Action
25
26
27 class DigAction(Action):
28 def run(self, rand, count, nameserver, hostname, queryopts):
29 opt_list = []
30 output = []
31
32 cmd_args = ["dig"]
33 if nameserver:
34 nameserver = "@" + nameserver
35 cmd_args.append(nameserver)
36
37 if isinstance(queryopts, str) and "," in queryopts:
38 opt_list = queryopts.split(",")
39 else:
40 opt_list.append(queryopts)
41
42 cmd_args.extend(["+" + option for option in opt_list])
43
44 cmd_args.append(hostname)
45
46 try:
47 raw_result = subprocess.Popen(
48 cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE
49 ).communicate()[0]
50
51 if sys.version_info >= (3,):
52 # This function might call getpreferred encoding unless we pass
53 # do_setlocale=False.
54 encoding = locale.getpreferredencoding(do_setlocale=False)
55 result_list_str = raw_result.decode(encoding)
56 else:
57 result_list_str = str(raw_result)
58
59 result_list = list(filter(None, result_list_str.split("\n")))
60
61 # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat
62 # for Python3:
63 # except FileNotFoundError as e:
64 except OSError as e:
65 if e.errno == errno.ENOENT:
66 return (
67 False,
68 "Can't find dig installed in the path (usually /usr/bin/dig). If "
69 "dig isn't installed, you can install it with 'sudo yum install "
70 "bind-utils' or 'sudo apt install dnsutils'",
71 )
72 else:
73 raise e
74
75 if int(count) > len(result_list) or count <= 0:
76 count = len(result_list)
77
78 output = result_list[0:count]
79 if rand is True:
80 random.shuffle(output)
81 return output
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/contrib/linux/actions/dig.py b/contrib/linux/actions/dig.py
--- a/contrib/linux/actions/dig.py
+++ b/contrib/linux/actions/dig.py
@@ -25,7 +25,7 @@
class DigAction(Action):
- def run(self, rand, count, nameserver, hostname, queryopts):
+ def run(self, rand, count, nameserver, hostname, queryopts, querytype):
opt_list = []
output = []
@@ -42,6 +42,7 @@
cmd_args.extend(["+" + option for option in opt_list])
cmd_args.append(hostname)
+ cmd_args.append(querytype)
try:
raw_result = subprocess.Popen(
@@ -56,6 +57,10 @@
else:
result_list_str = str(raw_result)
+ # Better format the output when the type is TXT
+ if querytype.lower() == "txt":
+ result_list_str = result_list_str.replace('"', "")
+
result_list = list(filter(None, result_list_str.split("\n")))
# NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat
| {"golden_diff": "diff --git a/contrib/linux/actions/dig.py b/contrib/linux/actions/dig.py\n--- a/contrib/linux/actions/dig.py\n+++ b/contrib/linux/actions/dig.py\n@@ -25,7 +25,7 @@\n \n \n class DigAction(Action):\n- def run(self, rand, count, nameserver, hostname, queryopts):\n+ def run(self, rand, count, nameserver, hostname, queryopts, querytype):\n opt_list = []\n output = []\n \n@@ -42,6 +42,7 @@\n cmd_args.extend([\"+\" + option for option in opt_list])\n \n cmd_args.append(hostname)\n+ cmd_args.append(querytype)\n \n try:\n raw_result = subprocess.Popen(\n@@ -56,6 +57,10 @@\n else:\n result_list_str = str(raw_result)\n \n+ # Better format the output when the type is TXT\n+ if querytype.lower() == \"txt\":\n+ result_list_str = result_list_str.replace('\"', \"\")\n+\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n \n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n", "issue": "Add query type to linux.dig action\n## SUMMARY\r\n\r\nI would like the ability to query TXT records and noticed there is no way to specify a query type to the dig action. \r\n\r\n### STACKSTORM VERSION\r\n\r\n`st2 3.6.0, on Python 3.6.8`\r\n\r\n## Steps to reproduce the problem\r\n\r\nI attempted a few ways to add \"TXT\" to the query by adding to queryopts or try appending to the string hostname. Upon looking at the code I realized nothing like that would work.\r\n\r\n## Expected Results\r\n\r\nGet a list returned of TXT records\r\n\r\n## Some sample code to add it\r\n\r\n```\r\nclass DigAction(Action):\r\n def run(self, rand, count, nameserver, hostname, queryopts, querytype): # Add querytype parameter\r\n opt_list = []\r\n output = []\r\n\r\n cmd_args = [\"dig\"]\r\n if nameserver:\r\n nameserver = \"@\" + nameserver\r\n cmd_args.append(nameserver)\r\n\r\n if isinstance(queryopts, str) and \",\" in queryopts:\r\n opt_list = queryopts.split(\",\")\r\n else:\r\n opt_list.append(queryopts)\r\n\r\n cmd_args.extend([\"+\" + option for option in opt_list])\r\n\r\n cmd_args.append(hostname)\r\n cmd_args.append(querytype) # append query type (Default is set to \"A\" in dig.yaml)\r\n\r\n try:\r\n raw_result = subprocess.Popen(\r\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\r\n ).communicate()[0]\r\n\r\n if sys.version_info >= (3,):\r\n # This function might call getpreferred encoding unless we pass\r\n # do_setlocale=False.\r\n encoding = locale.getpreferredencoding(do_setlocale=False)\r\n result_list_str = raw_result.decode(encoding)\r\n else:\r\n result_list_str = str(raw_result)\r\n\r\n if querytype.lower() == \"txt\": # improve the output formatting result of TXT records\r\n result_list_str = result_list_str.replace('\"', '') # strip quotes so we don't see \\\" wrapped around output\r\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\r\n```\r\n\r\nI only spent a few minutes on this code to test making it work for me. It could be improved on to make sure works for other types as well. I added inline comments to show the only lines I added\n", "before_files": [{"content": "#! /usr/bin/python\n\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport locale\nimport subprocess\nimport random\nimport sys\n\nfrom st2common.runners.base_action import Action\n\n\nclass DigAction(Action):\n def run(self, rand, count, nameserver, hostname, queryopts):\n opt_list = []\n output = []\n\n cmd_args = [\"dig\"]\n if nameserver:\n nameserver = \"@\" + nameserver\n cmd_args.append(nameserver)\n\n if isinstance(queryopts, str) and \",\" in queryopts:\n opt_list = queryopts.split(\",\")\n else:\n opt_list.append(queryopts)\n\n cmd_args.extend([\"+\" + option for option in opt_list])\n\n cmd_args.append(hostname)\n\n try:\n raw_result = subprocess.Popen(\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\n ).communicate()[0]\n\n if sys.version_info >= (3,):\n # This function might call getpreferred encoding unless we pass\n # do_setlocale=False.\n encoding = locale.getpreferredencoding(do_setlocale=False)\n result_list_str = raw_result.decode(encoding)\n else:\n result_list_str = str(raw_result)\n\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n\n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n # for Python3:\n # except FileNotFoundError as e:\n except OSError as e:\n if e.errno == errno.ENOENT:\n return (\n False,\n \"Can't find dig installed in the path (usually /usr/bin/dig). If \"\n \"dig isn't installed, you can install it with 'sudo yum install \"\n \"bind-utils' or 'sudo apt install dnsutils'\",\n )\n else:\n raise e\n\n if int(count) > len(result_list) or count <= 0:\n count = len(result_list)\n\n output = result_list[0:count]\n if rand is True:\n random.shuffle(output)\n return output\n", "path": "contrib/linux/actions/dig.py"}], "after_files": [{"content": "#! /usr/bin/python\n\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport locale\nimport subprocess\nimport random\nimport sys\n\nfrom st2common.runners.base_action import Action\n\n\nclass DigAction(Action):\n def run(self, rand, count, nameserver, hostname, queryopts, querytype):\n opt_list = []\n output = []\n\n cmd_args = [\"dig\"]\n if nameserver:\n nameserver = \"@\" + nameserver\n cmd_args.append(nameserver)\n\n if isinstance(queryopts, str) and \",\" in queryopts:\n opt_list = queryopts.split(\",\")\n else:\n opt_list.append(queryopts)\n\n cmd_args.extend([\"+\" + option for option in opt_list])\n\n cmd_args.append(hostname)\n cmd_args.append(querytype)\n\n try:\n raw_result = subprocess.Popen(\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\n ).communicate()[0]\n\n if sys.version_info >= (3,):\n # This function might call getpreferred encoding unless we pass\n # do_setlocale=False.\n encoding = locale.getpreferredencoding(do_setlocale=False)\n result_list_str = raw_result.decode(encoding)\n else:\n result_list_str = str(raw_result)\n\n # Better format the output when the type is TXT\n if querytype.lower() == \"txt\":\n result_list_str = result_list_str.replace('\"', \"\")\n\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n\n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n # for Python3:\n # except FileNotFoundError as e:\n except OSError as e:\n if e.errno == errno.ENOENT:\n return (\n False,\n \"Can't find dig installed in the path (usually /usr/bin/dig). If \"\n \"dig isn't installed, you can install it with 'sudo yum install \"\n \"bind-utils' or 'sudo apt install dnsutils'\",\n )\n else:\n raise e\n\n if int(count) > len(result_list) or count <= 0:\n count = len(result_list)\n\n output = result_list[0:count]\n if rand is True:\n random.shuffle(output)\n return output\n", "path": "contrib/linux/actions/dig.py"}]} | 1,468 | 261 |
gh_patches_debug_12944 | rasdani/github-patches | git_diff | Nitrate__Nitrate-438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Django 1.11
AC:
- Remove from `tox.ini`
- Remove from `.travis.yml`
- Update Django verison range in `setup.py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from setuptools import setup, find_packages
4
5
6 with open('VERSION.txt', 'r') as f:
7 pkg_version = f.read().strip()
8
9
10 def get_long_description():
11 with open('README.rst', 'r') as f:
12 return f.read()
13
14
15 install_requires = [
16 'beautifulsoup4 >= 4.1.1',
17 'django >= 1.11,<3.0',
18 'django-contrib-comments == 1.8.0',
19 'django-tinymce == 2.7.0',
20 'django-uuslug == 1.1.8',
21 'html2text',
22 'odfpy >= 0.9.6',
23 'python-bugzilla',
24 'xmltodict',
25 'kobo == 0.9.0'
26 ]
27
28 extras_require = {
29 'mysql': ['mysqlclient >= 1.2.3'],
30 'pgsql': ['psycopg2 == 2.7.5'],
31
32 # Required for tcms.auth.backends.KerberosBackend
33 'krbauth': [
34 'kerberos == 1.2.5'
35 ],
36
37 # Packages for building documentation
38 'docs': [
39 'Sphinx >= 1.1.2',
40 'sphinx_rtd_theme',
41 ],
42
43 # Necessary packages for running tests
44 'tests': [
45 'beautifulsoup4',
46 'coverage',
47 'factory_boy',
48 'flake8',
49 'mock',
50 'pytest < 4.2.0',
51 'pytest-cov',
52 'pytest-django',
53 ],
54
55 # Contain tools that assists the development
56 'devtools': [
57 'django-debug-toolbar == 1.7',
58 'tox',
59 'django-extensions',
60 'pygraphviz',
61 'future-breakpoint',
62 ],
63
64 # Required packages required to run async tasks
65 'async': [
66 'celery == 4.2.0',
67 ],
68
69 'multiauth': [
70 'social-auth-app-django == 3.1.0',
71 ]
72 }
73
74 setup(
75 name='Nitrate',
76 version=pkg_version,
77 description='Test Case Management System',
78 long_description=get_long_description(),
79 author='Nitrate Team',
80 maintainer='Chenxiong Qi',
81 maintainer_email='[email protected]',
82 url='https://github.com/Nitrate/Nitrate/',
83 license='GPLv2+',
84 keywords='test case',
85 install_requires=install_requires,
86 extras_require=extras_require,
87 python_requires='>=3.6',
88 package_dir={'': 'src'},
89 packages=find_packages('src', exclude=['test*']),
90 include_package_data=True,
91 zip_safe=False,
92 classifiers=[
93 'Framework :: Django',
94 'Framework :: Django :: 1.11',
95 'Framework :: Django :: 2.0',
96 'Framework :: Django :: 2.1',
97 'Intended Audience :: Developers',
98 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
99 'Programming Language :: Python :: 3',
100 'Programming Language :: Python :: 3.6',
101 'Programming Language :: Python :: 3.7',
102 'Programming Language :: Python :: 3 :: Only',
103 'Topic :: Software Development :: Quality Assurance',
104 'Topic :: Software Development :: Testing',
105 ],
106 project_urls={
107 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
108 'Source Code': 'https://github.com/Nitrate/Nitrate',
109 'Documentation': 'https://nitrate.readthedocs.io/',
110 },
111 )
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,7 +14,7 @@
install_requires = [
'beautifulsoup4 >= 4.1.1',
- 'django >= 1.11,<3.0',
+ 'django >= 2.0,<3.0',
'django-contrib-comments == 1.8.0',
'django-tinymce == 2.7.0',
'django-uuslug == 1.1.8',
@@ -91,7 +91,6 @@
zip_safe=False,
classifiers=[
'Framework :: Django',
- 'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \n install_requires = [\n 'beautifulsoup4 >= 4.1.1',\n- 'django >= 1.11,<3.0',\n+ 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n@@ -91,7 +91,6 @@\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n- 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n", "issue": "Drop Django 1.11\nAC:\r\n\r\n- Remove from `tox.ini`\r\n- Remove from `.travis.yml`\r\n- Update Django verison range in `setup.py`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]} | 1,321 | 189 |
gh_patches_debug_5982 | rasdani/github-patches | git_diff | mdn__kuma-6250 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Macro search results are mangled for non-en-US locales
See for example https://wiki.developer.mozilla.org/en-US/search?locale=*&kumascript_macros=WebExtAllExamples&topic=none
This lists all pages that call WebExtAllExamples, across all locales. One entry looks like:
<img width="893" alt="Screen Shot 2019-11-21 at 4 30 25 PM" src="https://user-images.githubusercontent.com/432915/69387936-3e5d4780-0c7c-11ea-9347-5916d638d12d.png">
This is the German translation of the https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Examples page.
But the first link, "**Beispiele für Erweiterungen**", has the en-US locale in the URL, like this: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - note the translated slug but the en-US locale. If I click it, I get "Create a new page", because that page doesn't exist.
After the short description, the entry is supposed to have "`${url} Score: 82.20941 translated from ${original}`, where `url` is the localized page, and `original` is the en-US version. But these are wrong too:
* `url`: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - nonexistent page with en-US locale but de slug
* `original`: https://developer.mozilla.org/de/docs/Mozilla/Add-ons/WebExtensions/Beispiele - the proper value for `url`
I've seen some cases where the "`${url} Score: 82.20941 translated from ${original}` bit doesn't appear, and then there is no usable link to the actual page, and I have to guess what the locale is, to be able to fix the link.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/search/fields.py`
Content:
```
1 from django.conf import settings
2 from rest_framework import serializers
3
4 from kuma.core.urlresolvers import reverse
5
6
7 class SearchQueryField(serializers.ReadOnlyField):
8 """
9 Field that returns the search query of the current request.
10 """
11 def __init__(self, *args, **kwargs):
12 kwargs['source'] = '*'
13 super(SearchQueryField, self).__init__(*args, **kwargs)
14
15 def to_representation(self, value):
16 request = self.context.get('request')
17 if request is None:
18 return ''
19 else:
20 return request.query_params.get('q', None)
21
22
23 class SiteURLField(serializers.ReadOnlyField):
24 """
25 A serializer field for creating URL for the given objects with the
26 given ``args``/``kwargs`` and a required ``locale`` attribute.
27 """
28 def __init__(self, url_name, args=None, kwargs=None):
29 self.url_name = url_name
30 self.args = args or []
31 self.kwargs = kwargs or []
32 super(SiteURLField, self).__init__(source='*')
33
34 def to_representation(self, value):
35 if not value:
36 return None
37 args = [getattr(value, arg) for arg in self.args]
38 kwargs = {arg: getattr(value, arg) for arg in self.kwargs}
39 locale = getattr(value, 'locale', settings.LANGUAGE_CODE)
40 path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)
41 return '%s%s' % (settings.SITE_URL, path)
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/search/fields.py b/kuma/search/fields.py
--- a/kuma/search/fields.py
+++ b/kuma/search/fields.py
@@ -37,5 +37,4 @@
args = [getattr(value, arg) for arg in self.args]
kwargs = {arg: getattr(value, arg) for arg in self.kwargs}
locale = getattr(value, 'locale', settings.LANGUAGE_CODE)
- path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)
- return '%s%s' % (settings.SITE_URL, path)
+ return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)
| {"golden_diff": "diff --git a/kuma/search/fields.py b/kuma/search/fields.py\n--- a/kuma/search/fields.py\n+++ b/kuma/search/fields.py\n@@ -37,5 +37,4 @@\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n- path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n- return '%s%s' % (settings.SITE_URL, path)\n+ return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n", "issue": "Macro search results are mangled for non-en-US locales\nSee for example https://wiki.developer.mozilla.org/en-US/search?locale=*&kumascript_macros=WebExtAllExamples&topic=none\r\n\r\nThis lists all pages that call WebExtAllExamples, across all locales. One entry looks like:\r\n\r\n<img width=\"893\" alt=\"Screen Shot 2019-11-21 at 4 30 25 PM\" src=\"https://user-images.githubusercontent.com/432915/69387936-3e5d4780-0c7c-11ea-9347-5916d638d12d.png\">\r\n\r\nThis is the German translation of the https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Examples page.\r\n\r\nBut the first link, \"**Beispiele f\u00fcr Erweiterungen**\", has the en-US locale in the URL, like this: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - note the translated slug but the en-US locale. If I click it, I get \"Create a new page\", because that page doesn't exist.\r\n\r\nAfter the short description, the entry is supposed to have \"`${url} Score: 82.20941 translated from ${original}`, where `url` is the localized page, and `original` is the en-US version. But these are wrong too:\r\n\r\n* `url`: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - nonexistent page with en-US locale but de slug\r\n* `original`: https://developer.mozilla.org/de/docs/Mozilla/Add-ons/WebExtensions/Beispiele - the proper value for `url`\r\n\r\n I've seen some cases where the \"`${url} Score: 82.20941 translated from ${original}` bit doesn't appear, and then there is no usable link to the actual page, and I have to guess what the locale is, to be able to fix the link.\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import serializers\n\nfrom kuma.core.urlresolvers import reverse\n\n\nclass SearchQueryField(serializers.ReadOnlyField):\n \"\"\"\n Field that returns the search query of the current request.\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super(SearchQueryField, self).__init__(*args, **kwargs)\n\n def to_representation(self, value):\n request = self.context.get('request')\n if request is None:\n return ''\n else:\n return request.query_params.get('q', None)\n\n\nclass SiteURLField(serializers.ReadOnlyField):\n \"\"\"\n A serializer field for creating URL for the given objects with the\n given ``args``/``kwargs`` and a required ``locale`` attribute.\n \"\"\"\n def __init__(self, url_name, args=None, kwargs=None):\n self.url_name = url_name\n self.args = args or []\n self.kwargs = kwargs or []\n super(SiteURLField, self).__init__(source='*')\n\n def to_representation(self, value):\n if not value:\n return None\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n return '%s%s' % (settings.SITE_URL, path)\n", "path": "kuma/search/fields.py"}], "after_files": [{"content": "from django.conf import settings\nfrom rest_framework import serializers\n\nfrom kuma.core.urlresolvers import reverse\n\n\nclass SearchQueryField(serializers.ReadOnlyField):\n \"\"\"\n Field that returns the search query of the current request.\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super(SearchQueryField, self).__init__(*args, **kwargs)\n\n def to_representation(self, value):\n request = self.context.get('request')\n if request is None:\n return ''\n else:\n return request.query_params.get('q', None)\n\n\nclass SiteURLField(serializers.ReadOnlyField):\n \"\"\"\n A serializer field for creating URL for the given objects with the\n given ``args``/``kwargs`` and a required ``locale`` attribute.\n \"\"\"\n def __init__(self, url_name, args=None, kwargs=None):\n self.url_name = url_name\n self.args = args or []\n self.kwargs = kwargs or []\n super(SiteURLField, self).__init__(source='*')\n\n def to_representation(self, value):\n if not value:\n return None\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n", "path": "kuma/search/fields.py"}]} | 1,106 | 149 |
gh_patches_debug_10682 | rasdani/github-patches | git_diff | encode__starlette-1609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Gzip Middleware content-length is incorrect
The following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware.
```
File "C:\Users\AberS\Documents\Github\index.py\.venv\lib\site-packages\h11\_writers.py", line 102, in send_eom
raise LocalProtocolError("Too little data for declared Content-Length")
h11._util.LocalProtocolError: Too little data for declared Content-Length
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/base.py`
Content:
```
1 import typing
2
3 import anyio
4
5 from starlette.requests import Request
6 from starlette.responses import Response, StreamingResponse
7 from starlette.types import ASGIApp, Receive, Scope, Send
8
9 RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]
10 DispatchFunction = typing.Callable[
11 [Request, RequestResponseEndpoint], typing.Awaitable[Response]
12 ]
13
14
15 class BaseHTTPMiddleware:
16 def __init__(
17 self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None
18 ) -> None:
19 self.app = app
20 self.dispatch_func = self.dispatch if dispatch is None else dispatch
21
22 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
23 if scope["type"] != "http":
24 await self.app(scope, receive, send)
25 return
26
27 async def call_next(request: Request) -> Response:
28 app_exc: typing.Optional[Exception] = None
29 send_stream, recv_stream = anyio.create_memory_object_stream()
30
31 async def coro() -> None:
32 nonlocal app_exc
33
34 async with send_stream:
35 try:
36 await self.app(scope, request.receive, send_stream.send)
37 except Exception as exc:
38 app_exc = exc
39
40 task_group.start_soon(coro)
41
42 try:
43 message = await recv_stream.receive()
44 except anyio.EndOfStream:
45 if app_exc is not None:
46 raise app_exc
47 raise RuntimeError("No response returned.")
48
49 assert message["type"] == "http.response.start"
50
51 async def body_stream() -> typing.AsyncGenerator[bytes, None]:
52 async with recv_stream:
53 async for message in recv_stream:
54 assert message["type"] == "http.response.body"
55 yield message.get("body", b"")
56
57 if app_exc is not None:
58 raise app_exc
59
60 response = StreamingResponse(
61 status_code=message["status"], content=body_stream()
62 )
63 response.raw_headers = message["headers"]
64 return response
65
66 async with anyio.create_task_group() as task_group:
67 request = Request(scope, receive=receive)
68 response = await self.dispatch_func(request, call_next)
69 await response(scope, receive, send)
70 task_group.cancel_scope.cancel()
71
72 async def dispatch(
73 self, request: Request, call_next: RequestResponseEndpoint
74 ) -> Response:
75 raise NotImplementedError() # pragma: no cover
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py
--- a/starlette/middleware/base.py
+++ b/starlette/middleware/base.py
@@ -52,7 +52,11 @@
async with recv_stream:
async for message in recv_stream:
assert message["type"] == "http.response.body"
- yield message.get("body", b"")
+ body = message.get("body", b"")
+ if body:
+ yield body
+ if not message.get("more_body", False):
+ break
if app_exc is not None:
raise app_exc
| {"golden_diff": "diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py\n--- a/starlette/middleware/base.py\n+++ b/starlette/middleware/base.py\n@@ -52,7 +52,11 @@\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n- yield message.get(\"body\", b\"\")\n+ body = message.get(\"body\", b\"\")\n+ if body:\n+ yield body\n+ if not message.get(\"more_body\", False):\n+ break\n \n if app_exc is not None:\n raise app_exc\n", "issue": "Gzip Middleware content-length is incorrect\nThe following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware.\r\n\r\n```\r\n File \"C:\\Users\\AberS\\Documents\\Github\\index.py\\.venv\\lib\\site-packages\\h11\\_writers.py\", line 102, in send_eom\r\n raise LocalProtocolError(\"Too little data for declared Content-Length\") \r\nh11._util.LocalProtocolError: Too little data for declared Content-Length\r\n```\r\n\n", "before_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, request.receive, send_stream.send)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n yield message.get(\"body\", b\"\")\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n task_group.cancel_scope.cancel()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}], "after_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, request.receive, send_stream.send)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n task_group.cancel_scope.cancel()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}]} | 1,053 | 137 |
gh_patches_debug_20790 | rasdani/github-patches | git_diff | rwth-i6__returnn-1464 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compile native op: native signal handler
When running `tools/compile_native_op.py` for example for `NativeLstm2` op, if the output file is specified it now looks like this:
```
/var/tmp/agerstenberger/returnn_native/native_signal_handler/3eb0034669/native_signal_handler.so
/var/tmp/agerstenberger/returnn_tf_cache/ops/NativeLstm2/8c9954fa8e/NativeLstm2.so
/var/tmp/agerstenberger/returnn_tf_cache/ops/GradOfNativeLstm2/d1a9d7605d/GradOfNativeLstm2.so
```
You would not expect to find native_signal_handler.so here.
Also the `i6_core` job `CompileNativeOpJob` does not check names of the op but just copies the first entry and the second entry as gradient .so., which is now wrong.
So now i'm asking, should we fix it here or do a more robust check in `i6_core`?
A fix here is very simply just moving the line
```python
NativeCodeCompiler.CollectedCompilers = []
```
after the init function is called.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/compile_native_op.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """
4 This explicitly compiles some of the native ops, and will tell you the so-filenames.
5 Normally all native ops (e.g. NativeLstm2 etc) are compiled on-the-fly within RETURNN.
6 When you export the computation graph (e.g. via ``compile_tf_graph.py``),
7 you explicitly must load these native ops.
8 """
9
10 from __future__ import annotations
11
12 import os
13 import sys
14 import typing
15
16 import _setup_returnn_env # noqa
17 from returnn import __main__ as rnn
18 from returnn.log import log
19 import argparse
20 import returnn.util.basic as util
21
22
23 config = None # type: typing.Optional["returnn.config.Config"]
24
25
26 def init(config_filename, log_verbosity):
27 """
28 :param str config_filename: filename to config-file
29 :param int log_verbosity:
30 """
31 rnn.init_better_exchook()
32 rnn.init_thread_join_hack()
33 if config_filename:
34 print("Using config file %r." % config_filename)
35 assert os.path.exists(config_filename)
36 rnn.init_config(config_filename=config_filename, command_line_options=[])
37 global config
38 config = rnn.config
39 config.set("log", None)
40 config.set("log_verbosity", log_verbosity)
41 config.set("use_tensorflow", True)
42 rnn.init_log()
43 print("Returnn compile-native-op starting up.", file=log.v1)
44 rnn.returnn_greeting()
45 rnn.init_backend_engine()
46 assert util.BackendEngine.is_tensorflow_selected(), "this is only for TensorFlow"
47 rnn.init_faulthandler()
48 if "network" in config.typed_dict:
49 print("Loading network")
50 from returnn.tf.network import TFNetwork
51
52 network = TFNetwork(name="", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False)
53 network.construct_from_dict(config.typed_dict["network"])
54
55
56 def main(argv):
57 """
58 Main entry.
59 """
60 from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler
61
62 CudaEnv.verbose_find_cuda = True
63 NativeCodeCompiler.CollectedCompilers = []
64
65 argparser = argparse.ArgumentParser(description="Compile some op")
66 argparser.add_argument("--config", help="filename to config-file")
67 argparser.add_argument("--native_op", help="op name. e.g. 'LstmGenericBase'")
68 argparser.add_argument(
69 "--blas_lib", default=None, help="specify which blas lib to use (path to .so or file name to search for)"
70 )
71 argparser.add_argument(
72 "--search_for_numpy_blas",
73 dest="search_for_numpy_blas",
74 action="store_true",
75 help="search for blas inside numpys .libs folder",
76 )
77 argparser.add_argument(
78 "--no_search_for_numpy_blas",
79 dest="search_for_numpy_blas",
80 action="store_false",
81 help="do not search for blas inside numpys .libs folder",
82 )
83 argparser.add_argument("--verbosity", default=4, type=int, help="5 for all seqs (default: 4)")
84 argparser.add_argument("--output_file", help="if given, will write the list of libs to this file")
85 args = argparser.parse_args(argv[1:])
86 init(config_filename=args.config, log_verbosity=args.verbosity)
87
88 import returnn.native_op as native_op
89 from returnn.tf.native_op import make_op, OpMaker
90
91 if args.native_op:
92 print("Loading native op %r" % args.native_op)
93 op_gen = getattr(native_op, args.native_op)
94 assert issubclass(op_gen, native_op.NativeOpGenBase)
95 make_op(
96 op_gen,
97 compiler_opts={"verbose": True},
98 search_for_numpy_blas=args.search_for_numpy_blas,
99 blas_lib=args.blas_lib,
100 )
101
102 libs = []
103 if OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround:
104 print("CUDA BLAS lib:", OpMaker.cuda_blas_gemm_so_filename())
105 libs.append(OpMaker.cuda_blas_gemm_so_filename())
106 elif OpMaker.with_cuda is False:
107 print("No CUDA.")
108
109 for compiler in NativeCodeCompiler.CollectedCompilers:
110 assert isinstance(compiler, NativeCodeCompiler)
111 print(compiler)
112 # noinspection PyProtectedMember
113 libs.append(compiler._so_filename)
114
115 if libs:
116 print("libs:")
117 for fn in libs:
118 print(fn)
119 else:
120 print("no libs compiled. use --native_op or --config")
121
122 if args.output_file:
123 with open(args.output_file, "w") as f:
124 for fn in libs:
125 f.write(fn + "\n")
126 print("Wrote lib list to file:", args.output_file)
127
128
129 if __name__ == "__main__":
130 main(sys.argv)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/compile_native_op.py b/tools/compile_native_op.py
--- a/tools/compile_native_op.py
+++ b/tools/compile_native_op.py
@@ -57,10 +57,10 @@
"""
Main entry.
"""
- from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler
+ from returnn.tf.util.basic import CudaEnv, OpCodeCompiler
CudaEnv.verbose_find_cuda = True
- NativeCodeCompiler.CollectedCompilers = []
+ OpCodeCompiler.CollectedCompilers = []
argparser = argparse.ArgumentParser(description="Compile some op")
argparser.add_argument("--config", help="filename to config-file")
@@ -106,8 +106,8 @@
elif OpMaker.with_cuda is False:
print("No CUDA.")
- for compiler in NativeCodeCompiler.CollectedCompilers:
- assert isinstance(compiler, NativeCodeCompiler)
+ for compiler in OpCodeCompiler.CollectedCompilers:
+ assert isinstance(compiler, OpCodeCompiler)
print(compiler)
# noinspection PyProtectedMember
libs.append(compiler._so_filename)
| {"golden_diff": "diff --git a/tools/compile_native_op.py b/tools/compile_native_op.py\n--- a/tools/compile_native_op.py\n+++ b/tools/compile_native_op.py\n@@ -57,10 +57,10 @@\n \"\"\"\n Main entry.\n \"\"\"\n- from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler\n+ from returnn.tf.util.basic import CudaEnv, OpCodeCompiler\n \n CudaEnv.verbose_find_cuda = True\n- NativeCodeCompiler.CollectedCompilers = []\n+ OpCodeCompiler.CollectedCompilers = []\n \n argparser = argparse.ArgumentParser(description=\"Compile some op\")\n argparser.add_argument(\"--config\", help=\"filename to config-file\")\n@@ -106,8 +106,8 @@\n elif OpMaker.with_cuda is False:\n print(\"No CUDA.\")\n \n- for compiler in NativeCodeCompiler.CollectedCompilers:\n- assert isinstance(compiler, NativeCodeCompiler)\n+ for compiler in OpCodeCompiler.CollectedCompilers:\n+ assert isinstance(compiler, OpCodeCompiler)\n print(compiler)\n # noinspection PyProtectedMember\n libs.append(compiler._so_filename)\n", "issue": "Compile native op: native signal handler\nWhen running `tools/compile_native_op.py` for example for `NativeLstm2` op, if the output file is specified it now looks like this:\r\n```\r\n/var/tmp/agerstenberger/returnn_native/native_signal_handler/3eb0034669/native_signal_handler.so\r\n/var/tmp/agerstenberger/returnn_tf_cache/ops/NativeLstm2/8c9954fa8e/NativeLstm2.so\r\n/var/tmp/agerstenberger/returnn_tf_cache/ops/GradOfNativeLstm2/d1a9d7605d/GradOfNativeLstm2.so\r\n```\r\n\r\nYou would not expect to find native_signal_handler.so here. \r\nAlso the `i6_core` job `CompileNativeOpJob` does not check names of the op but just copies the first entry and the second entry as gradient .so., which is now wrong.\r\n\r\nSo now i'm asking, should we fix it here or do a more robust check in `i6_core`?\r\n\r\nA fix here is very simply just moving the line\r\n```python\r\nNativeCodeCompiler.CollectedCompilers = []\r\n```\r\nafter the init function is called.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nThis explicitly compiles some of the native ops, and will tell you the so-filenames.\nNormally all native ops (e.g. NativeLstm2 etc) are compiled on-the-fly within RETURNN.\nWhen you export the computation graph (e.g. via ``compile_tf_graph.py``),\nyou explicitly must load these native ops.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport typing\n\nimport _setup_returnn_env # noqa\nfrom returnn import __main__ as rnn\nfrom returnn.log import log\nimport argparse\nimport returnn.util.basic as util\n\n\nconfig = None # type: typing.Optional[\"returnn.config.Config\"]\n\n\ndef init(config_filename, log_verbosity):\n \"\"\"\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n \"\"\"\n rnn.init_better_exchook()\n rnn.init_thread_join_hack()\n if config_filename:\n print(\"Using config file %r.\" % config_filename)\n assert os.path.exists(config_filename)\n rnn.init_config(config_filename=config_filename, command_line_options=[])\n global config\n config = rnn.config\n config.set(\"log\", None)\n config.set(\"log_verbosity\", log_verbosity)\n config.set(\"use_tensorflow\", True)\n rnn.init_log()\n print(\"Returnn compile-native-op starting up.\", file=log.v1)\n rnn.returnn_greeting()\n rnn.init_backend_engine()\n assert util.BackendEngine.is_tensorflow_selected(), \"this is only for TensorFlow\"\n rnn.init_faulthandler()\n if \"network\" in config.typed_dict:\n print(\"Loading network\")\n from returnn.tf.network import TFNetwork\n\n network = TFNetwork(name=\"\", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False)\n network.construct_from_dict(config.typed_dict[\"network\"])\n\n\ndef main(argv):\n \"\"\"\n Main entry.\n \"\"\"\n from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler\n\n CudaEnv.verbose_find_cuda = True\n NativeCodeCompiler.CollectedCompilers = []\n\n argparser = argparse.ArgumentParser(description=\"Compile some op\")\n argparser.add_argument(\"--config\", help=\"filename to config-file\")\n argparser.add_argument(\"--native_op\", help=\"op name. e.g. 'LstmGenericBase'\")\n argparser.add_argument(\n \"--blas_lib\", default=None, help=\"specify which blas lib to use (path to .so or file name to search for)\"\n )\n argparser.add_argument(\n \"--search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_true\",\n help=\"search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\n \"--no_search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_false\",\n help=\"do not search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\"--verbosity\", default=4, type=int, help=\"5 for all seqs (default: 4)\")\n argparser.add_argument(\"--output_file\", help=\"if given, will write the list of libs to this file\")\n args = argparser.parse_args(argv[1:])\n init(config_filename=args.config, log_verbosity=args.verbosity)\n\n import returnn.native_op as native_op\n from returnn.tf.native_op import make_op, OpMaker\n\n if args.native_op:\n print(\"Loading native op %r\" % args.native_op)\n op_gen = getattr(native_op, args.native_op)\n assert issubclass(op_gen, native_op.NativeOpGenBase)\n make_op(\n op_gen,\n compiler_opts={\"verbose\": True},\n search_for_numpy_blas=args.search_for_numpy_blas,\n blas_lib=args.blas_lib,\n )\n\n libs = []\n if OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround:\n print(\"CUDA BLAS lib:\", OpMaker.cuda_blas_gemm_so_filename())\n libs.append(OpMaker.cuda_blas_gemm_so_filename())\n elif OpMaker.with_cuda is False:\n print(\"No CUDA.\")\n\n for compiler in NativeCodeCompiler.CollectedCompilers:\n assert isinstance(compiler, NativeCodeCompiler)\n print(compiler)\n # noinspection PyProtectedMember\n libs.append(compiler._so_filename)\n\n if libs:\n print(\"libs:\")\n for fn in libs:\n print(fn)\n else:\n print(\"no libs compiled. use --native_op or --config\")\n\n if args.output_file:\n with open(args.output_file, \"w\") as f:\n for fn in libs:\n f.write(fn + \"\\n\")\n print(\"Wrote lib list to file:\", args.output_file)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "tools/compile_native_op.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nThis explicitly compiles some of the native ops, and will tell you the so-filenames.\nNormally all native ops (e.g. NativeLstm2 etc) are compiled on-the-fly within RETURNN.\nWhen you export the computation graph (e.g. via ``compile_tf_graph.py``),\nyou explicitly must load these native ops.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport typing\n\nimport _setup_returnn_env # noqa\nfrom returnn import __main__ as rnn\nfrom returnn.log import log\nimport argparse\nimport returnn.util.basic as util\n\n\nconfig = None # type: typing.Optional[\"returnn.config.Config\"]\n\n\ndef init(config_filename, log_verbosity):\n \"\"\"\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n \"\"\"\n rnn.init_better_exchook()\n rnn.init_thread_join_hack()\n if config_filename:\n print(\"Using config file %r.\" % config_filename)\n assert os.path.exists(config_filename)\n rnn.init_config(config_filename=config_filename, command_line_options=[])\n global config\n config = rnn.config\n config.set(\"log\", None)\n config.set(\"log_verbosity\", log_verbosity)\n config.set(\"use_tensorflow\", True)\n rnn.init_log()\n print(\"Returnn compile-native-op starting up.\", file=log.v1)\n rnn.returnn_greeting()\n rnn.init_backend_engine()\n assert util.BackendEngine.is_tensorflow_selected(), \"this is only for TensorFlow\"\n rnn.init_faulthandler()\n if \"network\" in config.typed_dict:\n print(\"Loading network\")\n from returnn.tf.network import TFNetwork\n\n network = TFNetwork(name=\"\", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False)\n network.construct_from_dict(config.typed_dict[\"network\"])\n\n\ndef main(argv):\n \"\"\"\n Main entry.\n \"\"\"\n from returnn.tf.util.basic import CudaEnv, OpCodeCompiler\n\n CudaEnv.verbose_find_cuda = True\n OpCodeCompiler.CollectedCompilers = []\n\n argparser = argparse.ArgumentParser(description=\"Compile some op\")\n argparser.add_argument(\"--config\", help=\"filename to config-file\")\n argparser.add_argument(\"--native_op\", help=\"op name. e.g. 'LstmGenericBase'\")\n argparser.add_argument(\n \"--blas_lib\", default=None, help=\"specify which blas lib to use (path to .so or file name to search for)\"\n )\n argparser.add_argument(\n \"--search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_true\",\n help=\"search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\n \"--no_search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_false\",\n help=\"do not search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\"--verbosity\", default=4, type=int, help=\"5 for all seqs (default: 4)\")\n argparser.add_argument(\"--output_file\", help=\"if given, will write the list of libs to this file\")\n args = argparser.parse_args(argv[1:])\n init(config_filename=args.config, log_verbosity=args.verbosity)\n\n import returnn.native_op as native_op\n from returnn.tf.native_op import make_op, OpMaker\n\n if args.native_op:\n print(\"Loading native op %r\" % args.native_op)\n op_gen = getattr(native_op, args.native_op)\n assert issubclass(op_gen, native_op.NativeOpGenBase)\n make_op(\n op_gen,\n compiler_opts={\"verbose\": True},\n search_for_numpy_blas=args.search_for_numpy_blas,\n blas_lib=args.blas_lib,\n )\n\n libs = []\n if OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround:\n print(\"CUDA BLAS lib:\", OpMaker.cuda_blas_gemm_so_filename())\n libs.append(OpMaker.cuda_blas_gemm_so_filename())\n elif OpMaker.with_cuda is False:\n print(\"No CUDA.\")\n\n for compiler in OpCodeCompiler.CollectedCompilers:\n assert isinstance(compiler, OpCodeCompiler)\n print(compiler)\n # noinspection PyProtectedMember\n libs.append(compiler._so_filename)\n\n if libs:\n print(\"libs:\")\n for fn in libs:\n print(fn)\n else:\n print(\"no libs compiled. use --native_op or --config\")\n\n if args.output_file:\n with open(args.output_file, \"w\") as f:\n for fn in libs:\n f.write(fn + \"\\n\")\n print(\"Wrote lib list to file:\", args.output_file)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "tools/compile_native_op.py"}]} | 1,851 | 248 |
gh_patches_debug_23570 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement GA tracking of downloads
From Luis:
_I've done some research about how to track the number of downloads in the website. We can track those events using Google Analytics as you suggested. There is a slight change of code that has to be implemented following Google Analytic's developer manual [here](https://developers.google.com/analytics/devguides/collection/analyticsjs/events). It is a bit more refined than copying and pasting code, although at a glance it doesn't seem to be extremely complicated._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-metadata_fields/ckanext/metadata_fields/plugin.py`
Content:
```
1 '''
2 Created on Apr 10, 2014
3
4 @author:alexandru-m-g
5 '''
6 import logging
7
8 import ckan.plugins as plugins
9 import ckan.plugins.toolkit as tk
10 from routes.mapper import SubMapper
11
12 import ckanext.metadata_fields.custom_validator as vd
13 import ckanext.metadata_fields.update as update
14
15 def list_of_all_groups():
16 groups = tk.get_action('group_list')(data_dict={'all_fields': True})
17 return groups
18
19
20 class HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):
21 plugins.implements(plugins.IConfigurer, inherit=False)
22 plugins.implements(plugins.IRoutes, inherit=True)
23 plugins.implements(plugins.IDatasetForm, inherit=False)
24 plugins.implements(plugins.ITemplateHelpers)
25 plugins.implements(plugins.IActions)
26
27 def update_config(self, config):
28 tk.add_template_directory(config, 'templates')
29
30 def before_map(self, map):
31 with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:
32 m.connect('add dataset', '/dataset/new', action='new')
33 m.connect('/dataset/{action}/{id}',
34 requirements=dict(action='|'.join([
35 'new_metadata',
36 'new_resource',
37 ])))
38 return map
39
40 def is_fallback(self):
41 return True
42
43 def package_types(self):
44 # default - no specific package type
45 return []
46
47 def _modify_package_schema(self, schema):
48
49 schema.update({
50 'package_creator': [tk.get_validator('not_empty'),
51 tk.get_converter('convert_to_extras')],
52 'groups_list': [vd.groups_not_empty],
53 'caveats' : [tk.get_validator('ignore_missing'),
54 tk.get_converter('convert_to_extras')],
55 'dataset_source' : [tk.get_validator('not_empty'),
56 tk.get_converter('convert_to_extras')],
57 'dataset_date' : [tk.get_validator('ignore_missing'),
58 tk.get_converter('convert_to_extras')],
59 'methodology' : [tk.get_validator('ignore_missing'),
60 tk.get_converter('convert_to_extras')],
61 })
62
63 return schema
64
65
66 def create_package_schema(self):
67 schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()
68 schema = self._modify_package_schema(schema)
69 return schema
70
71 def update_package_schema(self):
72 schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()
73 schema = self._modify_package_schema(schema)
74 return schema
75
76 def show_package_schema(self):
77 schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()
78
79 schema.update({
80 'package_creator': [tk.get_converter('convert_from_extras'),
81 tk.get_validator('ignore_missing')],
82 'caveats' : [tk.get_converter('convert_from_extras'),
83 tk.get_validator('ignore_missing')],
84 'dataset_source' : [tk.get_converter('convert_from_extras'),
85 tk.get_validator('ignore_missing')],
86 'dataset_date' : [tk.get_converter('convert_from_extras'),
87 tk.get_validator('ignore_missing')],
88 'methodology' : [tk.get_converter('convert_from_extras'),
89 tk.get_validator('ignore_missing')],
90 })
91 return schema
92
93
94 def get_helpers(self):
95 return {'list_of_all_groups': list_of_all_groups}
96
97 def get_actions(self):
98 return {'package_update': update.package_update}
99
100
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py
--- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py
+++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py
@@ -47,6 +47,7 @@
def _modify_package_schema(self, schema):
schema.update({
+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required
'package_creator': [tk.get_validator('not_empty'),
tk.get_converter('convert_to_extras')],
'groups_list': [vd.groups_not_empty],
@@ -75,8 +76,8 @@
def show_package_schema(self):
schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()
-
schema.update({
+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required
'package_creator': [tk.get_converter('convert_from_extras'),
tk.get_validator('ignore_missing')],
'caveats' : [tk.get_converter('convert_from_extras'),
| {"golden_diff": "diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n--- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n+++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n@@ -47,6 +47,7 @@\n def _modify_package_schema(self, schema):\n \n schema.update({\n+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n@@ -75,8 +76,8 @@\n \n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n-\n schema.update({\n+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n", "issue": "Implement GA tracking of downloads\nFrom Luis: \n\n_I've done some research about how to track the number of downloads in the website. We can track those events using Google Analytics as you suggested. There is a slight change of code that has to be implemented following Google Analytic's developer manual [here](https://developers.google.com/analytics/devguides/collection/analyticsjs/events). It is a bit more refined than copying and pasting code, although at a glance it doesn't seem to be extremely complicated._\n\n", "before_files": [{"content": "'''\nCreated on Apr 10, 2014\n\n@author:alexandru-m-g\n'''\nimport logging\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nfrom routes.mapper import SubMapper\n\nimport ckanext.metadata_fields.custom_validator as vd\nimport ckanext.metadata_fields.update as update\n\ndef list_of_all_groups():\n groups = tk.get_action('group_list')(data_dict={'all_fields': True})\n return groups\n\n\nclass HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IDatasetForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def before_map(self, map):\n with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:\n m.connect('add dataset', '/dataset/new', action='new')\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'new_metadata',\n 'new_resource',\n ])))\n return map\n \n def is_fallback(self):\n return True\n\n def package_types(self):\n # default - no specific package type\n return []\n\n def _modify_package_schema(self, schema):\n \n schema.update({\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n 'caveats' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'dataset_source' : [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'dataset_date' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n })\n\n return schema\n\n\n def create_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n\n schema.update({\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_source' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_date' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n })\n return schema\n \n \n def get_helpers(self):\n return {'list_of_all_groups': list_of_all_groups}\n \n def get_actions(self):\n return {'package_update': update.package_update}\n\n\n", "path": "ckanext-metadata_fields/ckanext/metadata_fields/plugin.py"}], "after_files": [{"content": "'''\nCreated on Apr 10, 2014\n\n@author:alexandru-m-g\n'''\nimport logging\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nfrom routes.mapper import SubMapper\n\nimport ckanext.metadata_fields.custom_validator as vd\nimport ckanext.metadata_fields.update as update\n\ndef list_of_all_groups():\n groups = tk.get_action('group_list')(data_dict={'all_fields': True})\n return groups\n\n\nclass HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IDatasetForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def before_map(self, map):\n with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:\n m.connect('add dataset', '/dataset/new', action='new')\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'new_metadata',\n 'new_resource',\n ])))\n return map\n \n def is_fallback(self):\n return True\n\n def package_types(self):\n # default - no specific package type\n return []\n\n def _modify_package_schema(self, schema):\n \n schema.update({\n 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n 'caveats' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'dataset_source' : [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'dataset_date' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n })\n\n return schema\n\n\n def create_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n schema.update({\n 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_source' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_date' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n })\n return schema\n \n \n def get_helpers(self):\n return {'list_of_all_groups': list_of_all_groups}\n \n def get_actions(self):\n return {'package_update': update.package_update}\n\n\n", "path": "ckanext-metadata_fields/ckanext/metadata_fields/plugin.py"}]} | 1,304 | 260 |
gh_patches_debug_14000 | rasdani/github-patches | git_diff | ivy-llc__ivy-22412 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scan
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/lax/control_flow_operators.py`
Content:
```
1 # global
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
4
5
6 @to_ivy_arrays_and_back
7 def cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):
8 if operand is not None:
9 if operands:
10 raise ivy.utils.exceptions.IvyException(
11 "if `operand` is passed, positional `operands` should not be passed"
12 )
13 operands = (operand,)
14
15 if pred:
16 return true_fun(*operands)
17 return false_fun(*operands)
18
19
20 @to_ivy_arrays_and_back
21 def map(f, xs):
22 return ivy.stack([f(x) for x in xs])
23
24
25 @to_ivy_arrays_and_back
26 def switch(index, branches, *operands, operand=None):
27 if operand is not None:
28 if operands:
29 raise ivy.utils.exceptions.IvyException(
30 "if `operand` is passed, positional `operands` should not be passed"
31 )
32 operands = (operand,)
33
34 index = max(index, 0)
35 index = min(len(branches) - 1, index)
36 return branches[index](*operands)
37
38
39 @to_ivy_arrays_and_back
40 def fori_loop(lower, upper, body_fun, init_val):
41 if not (callable(body_fun)):
42 raise ivy.exceptions.IvyException(
43 "jax.lax.fori_loop: Argument body_fun should be callable."
44 )
45 val = init_val
46 for i in range(lower, upper):
47 val = body_fun(i, val)
48 return val
49
50
51 @to_ivy_arrays_and_back
52 def while_loop(cond_fun, body_fun, init_val):
53 if not (callable(body_fun) and callable(cond_fun)):
54 raise ivy.exceptions.IvyException(
55 "jax.lax.while_loop: Arguments body_fun and cond_fun should be callable."
56 )
57 val = init_val
58 while cond_fun(val):
59 val = body_fun(val)
60 return val
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/jax/lax/control_flow_operators.py b/ivy/functional/frontends/jax/lax/control_flow_operators.py
--- a/ivy/functional/frontends/jax/lax/control_flow_operators.py
+++ b/ivy/functional/frontends/jax/lax/control_flow_operators.py
@@ -58,3 +58,29 @@
while cond_fun(val):
val = body_fun(val)
return val
+
+
+@to_ivy_arrays_and_back
+def scan(f, init, xs, length=None, reverse=False, unroll=1):
+ if not (callable(f)):
+ raise ivy.exceptions.IvyException(
+ "jax.lax.scan: Argument f should be callable."
+ )
+ if xs is None and length is None:
+ raise ivy.exceptions.IvyException(
+ "jax.lax.scan: Either xs or length must be provided."
+ )
+
+ if length is not None and (not isinstance(length, int) or length < 0):
+ raise ivy.exceptions.IvyException(
+ "jax.lax.scan: length must be a non-negative integer."
+ )
+ if xs is None:
+ xs = [None] * length
+
+ carry = init
+ ys = []
+ for x in xs:
+ carry, y = f(carry, x)
+ ys.append(y)
+ return carry, ivy.stack(ys)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/lax/control_flow_operators.py b/ivy/functional/frontends/jax/lax/control_flow_operators.py\n--- a/ivy/functional/frontends/jax/lax/control_flow_operators.py\n+++ b/ivy/functional/frontends/jax/lax/control_flow_operators.py\n@@ -58,3 +58,29 @@\n while cond_fun(val):\n val = body_fun(val)\n return val\n+\n+\n+@to_ivy_arrays_and_back\n+def scan(f, init, xs, length=None, reverse=False, unroll=1):\n+ if not (callable(f)):\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: Argument f should be callable.\"\n+ )\n+ if xs is None and length is None:\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: Either xs or length must be provided.\"\n+ )\n+\n+ if length is not None and (not isinstance(length, int) or length < 0):\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: length must be a non-negative integer.\"\n+ )\n+ if xs is None:\n+ xs = [None] * length\n+\n+ carry = init\n+ ys = []\n+ for x in xs:\n+ carry, y = f(carry, x)\n+ ys.append(y)\n+ return carry, ivy.stack(ys)\n", "issue": "scan\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n if pred:\n return true_fun(*operands)\n return false_fun(*operands)\n\n\n@to_ivy_arrays_and_back\ndef map(f, xs):\n return ivy.stack([f(x) for x in xs])\n\n\n@to_ivy_arrays_and_back\ndef switch(index, branches, *operands, operand=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n index = max(index, 0)\n index = min(len(branches) - 1, index)\n return branches[index](*operands)\n\n\n@to_ivy_arrays_and_back\ndef fori_loop(lower, upper, body_fun, init_val):\n if not (callable(body_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.fori_loop: Argument body_fun should be callable.\"\n )\n val = init_val\n for i in range(lower, upper):\n val = body_fun(i, val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef while_loop(cond_fun, body_fun, init_val):\n if not (callable(body_fun) and callable(cond_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.while_loop: Arguments body_fun and cond_fun should be callable.\"\n )\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n", "path": "ivy/functional/frontends/jax/lax/control_flow_operators.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n if pred:\n return true_fun(*operands)\n return false_fun(*operands)\n\n\n@to_ivy_arrays_and_back\ndef map(f, xs):\n return ivy.stack([f(x) for x in xs])\n\n\n@to_ivy_arrays_and_back\ndef switch(index, branches, *operands, operand=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n index = max(index, 0)\n index = min(len(branches) - 1, index)\n return branches[index](*operands)\n\n\n@to_ivy_arrays_and_back\ndef fori_loop(lower, upper, body_fun, init_val):\n if not (callable(body_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.fori_loop: Argument body_fun should be callable.\"\n )\n val = init_val\n for i in range(lower, upper):\n val = body_fun(i, val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef while_loop(cond_fun, body_fun, init_val):\n if not (callable(body_fun) and callable(cond_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.while_loop: Arguments body_fun and cond_fun should be callable.\"\n )\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef scan(f, init, xs, length=None, reverse=False, unroll=1):\n if not (callable(f)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.scan: Argument f should be callable.\"\n )\n if xs is None and length is None:\n raise ivy.exceptions.IvyException(\n \"jax.lax.scan: Either xs or length must be provided.\"\n )\n\n if length is not None and (not isinstance(length, int) or length < 0):\n raise ivy.exceptions.IvyException(\n \"jax.lax.scan: length must be a non-negative integer.\"\n )\n if xs is None:\n xs = [None] * length\n\n carry = init\n ys = []\n for x in xs:\n carry, y = f(carry, x)\n ys.append(y)\n return carry, ivy.stack(ys)\n", "path": "ivy/functional/frontends/jax/lax/control_flow_operators.py"}]} | 809 | 324 |
gh_patches_debug_27218 | rasdani/github-patches | git_diff | fedora-infra__bodhi-2906 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop bodhi.server.services.zz_redirects
This module exists to redirect legacy Bodhi 1 URLs to the Bodhi 2 counterparts, but I don't think we need it anymore. Bodhi 2 is not backwards compatible with Bodhi 1, and Bodhi 4 will also be further incompatible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/services/zz_redirects.py`
Content:
```
1 # Copyright © 2015-2017 Red Hat, Inc.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 Handle general redirect stuff.
20
21 This module name gets a 'zz_' tacked on the front so that it comes last.
22 We need to catch /updates/{id}/request and /updates/{id}/edit first and those
23 get defined in the other service modules.
24 """
25
26 from cornice import Service
27 from pyramid.httpexceptions import HTTPFound
28
29 import bodhi.server.security
30
31
32 zz_bodhi1_update_redirect = Service(
33 name='bodhi1_update_redirect', path='/updates/{id}/{title}',
34 description='Redirect to old updates/ALIAS/TITLE urls',
35 cors_origins=bodhi.server.security.cors_origins_rw)
36
37
38 @zz_bodhi1_update_redirect.get()
39 def zz_get_bodhi1_update_redirect(request):
40 """
41 Redirect users from the Bodhi 1 update URL to the new path.
42
43 Args:
44 request (pyramid.request): The current web request.
45 Returns:
46 pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL
47 heirarchy.
48 """
49 return HTTPFound("/updates/{0}".format(request.matchdict['id']))
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/services/zz_redirects.py b/bodhi/server/services/zz_redirects.py
deleted file mode 100644
--- a/bodhi/server/services/zz_redirects.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright © 2015-2017 Red Hat, Inc.
-#
-# This file is part of Bodhi.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-"""
-Handle general redirect stuff.
-
-This module name gets a 'zz_' tacked on the front so that it comes last.
-We need to catch /updates/{id}/request and /updates/{id}/edit first and those
-get defined in the other service modules.
-"""
-
-from cornice import Service
-from pyramid.httpexceptions import HTTPFound
-
-import bodhi.server.security
-
-
-zz_bodhi1_update_redirect = Service(
- name='bodhi1_update_redirect', path='/updates/{id}/{title}',
- description='Redirect to old updates/ALIAS/TITLE urls',
- cors_origins=bodhi.server.security.cors_origins_rw)
-
-
-@zz_bodhi1_update_redirect.get()
-def zz_get_bodhi1_update_redirect(request):
- """
- Redirect users from the Bodhi 1 update URL to the new path.
-
- Args:
- request (pyramid.request): The current web request.
- Returns:
- pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL
- heirarchy.
- """
- return HTTPFound("/updates/{0}".format(request.matchdict['id']))
| {"golden_diff": "diff --git a/bodhi/server/services/zz_redirects.py b/bodhi/server/services/zz_redirects.py\ndeleted file mode 100644\n--- a/bodhi/server/services/zz_redirects.py\n+++ /dev/null\n@@ -1,49 +0,0 @@\n-# Copyright \u00a9 2015-2017 Red Hat, Inc.\n-#\n-# This file is part of Bodhi.\n-#\n-# This program is free software; you can redistribute it and/or\n-# modify it under the terms of the GNU General Public License\n-# as published by the Free Software Foundation; either version 2\n-# of the License, or (at your option) any later version.\n-#\n-# This program is distributed in the hope that it will be useful,\n-# but WITHOUT ANY WARRANTY; without even the implied warranty of\n-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n-# GNU General Public License for more details.\n-#\n-# You should have received a copy of the GNU General Public License\n-# along with this program; if not, write to the Free Software\n-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n-\"\"\"\n-Handle general redirect stuff.\n-\n-This module name gets a 'zz_' tacked on the front so that it comes last.\n-We need to catch /updates/{id}/request and /updates/{id}/edit first and those\n-get defined in the other service modules.\n-\"\"\"\n-\n-from cornice import Service\n-from pyramid.httpexceptions import HTTPFound\n-\n-import bodhi.server.security\n-\n-\n-zz_bodhi1_update_redirect = Service(\n- name='bodhi1_update_redirect', path='/updates/{id}/{title}',\n- description='Redirect to old updates/ALIAS/TITLE urls',\n- cors_origins=bodhi.server.security.cors_origins_rw)\n-\n-\n-@zz_bodhi1_update_redirect.get()\n-def zz_get_bodhi1_update_redirect(request):\n- \"\"\"\n- Redirect users from the Bodhi 1 update URL to the new path.\n-\n- Args:\n- request (pyramid.request): The current web request.\n- Returns:\n- pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL\n- heirarchy.\n- \"\"\"\n- return HTTPFound(\"/updates/{0}\".format(request.matchdict['id']))\n", "issue": "Drop bodhi.server.services.zz_redirects\nThis module exists to redirect legacy Bodhi 1 URLs to the Bodhi 2 counterparts, but I don't think we need it anymore. Bodhi 2 is not backwards compatible with Bodhi 1, and Bodhi 4 will also be further incompatible.\n", "before_files": [{"content": "# Copyright \u00a9 2015-2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nHandle general redirect stuff.\n\nThis module name gets a 'zz_' tacked on the front so that it comes last.\nWe need to catch /updates/{id}/request and /updates/{id}/edit first and those\nget defined in the other service modules.\n\"\"\"\n\nfrom cornice import Service\nfrom pyramid.httpexceptions import HTTPFound\n\nimport bodhi.server.security\n\n\nzz_bodhi1_update_redirect = Service(\n name='bodhi1_update_redirect', path='/updates/{id}/{title}',\n description='Redirect to old updates/ALIAS/TITLE urls',\n cors_origins=bodhi.server.security.cors_origins_rw)\n\n\n@zz_bodhi1_update_redirect.get()\ndef zz_get_bodhi1_update_redirect(request):\n \"\"\"\n Redirect users from the Bodhi 1 update URL to the new path.\n\n Args:\n request (pyramid.request): The current web request.\n Returns:\n pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL\n heirarchy.\n \"\"\"\n return HTTPFound(\"/updates/{0}\".format(request.matchdict['id']))\n", "path": "bodhi/server/services/zz_redirects.py"}], "after_files": [{"content": null, "path": "bodhi/server/services/zz_redirects.py"}]} | 854 | 538 |
gh_patches_debug_30030 | rasdani/github-patches | git_diff | OCA__server-tools-316 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[8.0][dead_mans_switch_client] Module crashes runbots
I'm seeing more and more runbots with :x: because of this module. [This seems the offending line](https://github.com/OCA/server-tools/blob/8.0/dead_mans_switch_client/models/dead_mans_switch_client.py#L54). Any clue on how to fix it?
Example runbot: https://runbot.odoo-community.org/runbot/build/3137787
CC @hbrunn.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dead_mans_switch_client/__openerp__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # © 2015 Therp BV <http://therp.nl>
3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
4 {
5 "name": "Dead man's switch (client)",
6 "version": "8.0.1.0.0",
7 "author": "Therp BV,Odoo Community Association (OCA)",
8 "license": "AGPL-3",
9 "category": "Monitoring",
10 "summary": "Be notified when customers' odoo instances go down",
11 "depends": [
12 'base',
13 ],
14 "data": [
15 "data/ir_actions.xml",
16 "data/ir_cron.xml",
17 ],
18 }
19
```
Path: `dead_mans_switch_client/models/dead_mans_switch_client.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # © 2015 Therp BV <http://therp.nl>
3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
4 import json
5 import logging
6 import os
7 try:
8 import psutil
9 except ImportError:
10 psutil = None
11 import urllib2
12 from openerp import api, models
13
14
15 class DeadMansSwitchClient(models.AbstractModel):
16 _name = 'dead.mans.switch.client'
17 _register = True
18
19 @api.model
20 def _get_data(self):
21 ram = 0
22 cpu = 0
23 if psutil:
24 process = psutil.Process(os.getpid())
25 # psutil changed its api through versions
26 if process.parent:
27 if hasattr(process.parent, '__call__'):
28 process = process.parent()
29 else:
30 process = process.parent
31 if hasattr(process, 'memory_percent'):
32 ram = process.memory_percent()
33 if hasattr(process, 'cpu_percent'):
34 cpu = process.cpu_percent()
35 user_count = 0
36 if 'im_chat.presence' in self.env.registry:
37 user_count = len(self.env['im_chat.presence'].search([
38 ('status', '!=', 'offline'),
39 ]))
40 return {
41 'database_uuid': self.env['ir.config_parameter'].get_param(
42 'database.uuid'),
43 'cpu': cpu,
44 'ram': ram,
45 'user_count': user_count,
46 }
47
48 @api.model
49 def alive(self):
50 url = self.env['ir.config_parameter'].get_param(
51 'dead_mans_switch_client.url')
52 logger = logging.getLogger(__name__)
53 if not url:
54 logger.error('No server configured!')
55 return
56 data = self._get_data()
57 logger.debug('sending %s', data)
58 urllib2.urlopen(
59 urllib2.Request(
60 url,
61 json.dumps({
62 'jsonrpc': '2.0',
63 'method': 'call',
64 'params': data,
65 }),
66 {
67 'Content-Type': 'application/json',
68 }))
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dead_mans_switch_client/__openerp__.py b/dead_mans_switch_client/__openerp__.py
--- a/dead_mans_switch_client/__openerp__.py
+++ b/dead_mans_switch_client/__openerp__.py
@@ -3,7 +3,7 @@
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Dead man's switch (client)",
- "version": "8.0.1.0.0",
+ "version": "8.0.1.0.1",
"author": "Therp BV,Odoo Community Association (OCA)",
"license": "AGPL-3",
"category": "Monitoring",
@@ -15,4 +15,7 @@
"data/ir_actions.xml",
"data/ir_cron.xml",
],
+ "demo": [
+ "demo/dead_mans_switch_client_demo.yml",
+ ],
}
diff --git a/dead_mans_switch_client/models/dead_mans_switch_client.py b/dead_mans_switch_client/models/dead_mans_switch_client.py
--- a/dead_mans_switch_client/models/dead_mans_switch_client.py
+++ b/dead_mans_switch_client/models/dead_mans_switch_client.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# © 2015 Therp BV <http://therp.nl>
+# © 2015 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
import logging
@@ -66,3 +67,19 @@
{
'Content-Type': 'application/json',
}))
+
+ @api.model
+ def _install_default_url(self):
+ """Set up a default URL."""
+ conf = self.env["ir.config_parameter"]
+ name = "dead_mans_switch_client.url"
+ param = conf.get_param(name)
+
+ if not param:
+ url = "{}/dead_mans_switch/alive".format(
+ conf.get_param(
+ "report.url",
+ conf.get_param(
+ "web.base.url",
+ "http://localhost")))
+ conf.set_param(name, url)
| {"golden_diff": "diff --git a/dead_mans_switch_client/__openerp__.py b/dead_mans_switch_client/__openerp__.py\n--- a/dead_mans_switch_client/__openerp__.py\n+++ b/dead_mans_switch_client/__openerp__.py\n@@ -3,7 +3,7 @@\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n {\n \"name\": \"Dead man's switch (client)\",\n- \"version\": \"8.0.1.0.0\",\n+ \"version\": \"8.0.1.0.1\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n@@ -15,4 +15,7 @@\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n+ \"demo\": [\n+ \"demo/dead_mans_switch_client_demo.yml\",\n+ ],\n }\ndiff --git a/dead_mans_switch_client/models/dead_mans_switch_client.py b/dead_mans_switch_client/models/dead_mans_switch_client.py\n--- a/dead_mans_switch_client/models/dead_mans_switch_client.py\n+++ b/dead_mans_switch_client/models/dead_mans_switch_client.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n # \u00a9 2015 Therp BV <http://therp.nl>\n+# \u00a9 2015 Grupo ESOC Ingenier\u00eda de Servicios, S.L.U. - Jairo Llopis\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n import json\n import logging\n@@ -66,3 +67,19 @@\n {\n 'Content-Type': 'application/json',\n }))\n+\n+ @api.model\n+ def _install_default_url(self):\n+ \"\"\"Set up a default URL.\"\"\"\n+ conf = self.env[\"ir.config_parameter\"]\n+ name = \"dead_mans_switch_client.url\"\n+ param = conf.get_param(name)\n+\n+ if not param:\n+ url = \"{}/dead_mans_switch/alive\".format(\n+ conf.get_param(\n+ \"report.url\",\n+ conf.get_param(\n+ \"web.base.url\",\n+ \"http://localhost\")))\n+ conf.set_param(name, url)\n", "issue": "[8.0][dead_mans_switch_client] Module crashes runbots\nI'm seeing more and more runbots with :x: because of this module. [This seems the offending line](https://github.com/OCA/server-tools/blob/8.0/dead_mans_switch_client/models/dead_mans_switch_client.py#L54). Any clue on how to fix it?\n\nExample runbot: https://runbot.odoo-community.org/runbot/build/3137787\n\nCC @hbrunn.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Dead man's switch (client)\",\n \"version\": \"8.0.1.0.0\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n \"summary\": \"Be notified when customers' odoo instances go down\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n}\n", "path": "dead_mans_switch_client/__openerp__.py"}, {"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nimport json\nimport logging\nimport os\ntry:\n import psutil\nexcept ImportError:\n psutil = None\nimport urllib2\nfrom openerp import api, models\n\n\nclass DeadMansSwitchClient(models.AbstractModel):\n _name = 'dead.mans.switch.client'\n _register = True\n\n @api.model\n def _get_data(self):\n ram = 0\n cpu = 0\n if psutil:\n process = psutil.Process(os.getpid())\n # psutil changed its api through versions\n if process.parent:\n if hasattr(process.parent, '__call__'):\n process = process.parent()\n else:\n process = process.parent\n if hasattr(process, 'memory_percent'):\n ram = process.memory_percent()\n if hasattr(process, 'cpu_percent'):\n cpu = process.cpu_percent()\n user_count = 0\n if 'im_chat.presence' in self.env.registry:\n user_count = len(self.env['im_chat.presence'].search([\n ('status', '!=', 'offline'),\n ]))\n return {\n 'database_uuid': self.env['ir.config_parameter'].get_param(\n 'database.uuid'),\n 'cpu': cpu,\n 'ram': ram,\n 'user_count': user_count,\n }\n\n @api.model\n def alive(self):\n url = self.env['ir.config_parameter'].get_param(\n 'dead_mans_switch_client.url')\n logger = logging.getLogger(__name__)\n if not url:\n logger.error('No server configured!')\n return\n data = self._get_data()\n logger.debug('sending %s', data)\n urllib2.urlopen(\n urllib2.Request(\n url,\n json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'call',\n 'params': data,\n }),\n {\n 'Content-Type': 'application/json',\n }))\n", "path": "dead_mans_switch_client/models/dead_mans_switch_client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Dead man's switch (client)\",\n \"version\": \"8.0.1.0.1\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n \"summary\": \"Be notified when customers' odoo instances go down\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n \"demo\": [\n \"demo/dead_mans_switch_client_demo.yml\",\n ],\n}\n", "path": "dead_mans_switch_client/__openerp__.py"}, {"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# \u00a9 2015 Grupo ESOC Ingenier\u00eda de Servicios, S.L.U. - Jairo Llopis\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nimport json\nimport logging\nimport os\ntry:\n import psutil\nexcept ImportError:\n psutil = None\nimport urllib2\nfrom openerp import api, models\n\n\nclass DeadMansSwitchClient(models.AbstractModel):\n _name = 'dead.mans.switch.client'\n _register = True\n\n @api.model\n def _get_data(self):\n ram = 0\n cpu = 0\n if psutil:\n process = psutil.Process(os.getpid())\n # psutil changed its api through versions\n if process.parent:\n if hasattr(process.parent, '__call__'):\n process = process.parent()\n else:\n process = process.parent\n if hasattr(process, 'memory_percent'):\n ram = process.memory_percent()\n if hasattr(process, 'cpu_percent'):\n cpu = process.cpu_percent()\n user_count = 0\n if 'im_chat.presence' in self.env.registry:\n user_count = len(self.env['im_chat.presence'].search([\n ('status', '!=', 'offline'),\n ]))\n return {\n 'database_uuid': self.env['ir.config_parameter'].get_param(\n 'database.uuid'),\n 'cpu': cpu,\n 'ram': ram,\n 'user_count': user_count,\n }\n\n @api.model\n def alive(self):\n url = self.env['ir.config_parameter'].get_param(\n 'dead_mans_switch_client.url')\n logger = logging.getLogger(__name__)\n if not url:\n logger.error('No server configured!')\n return\n data = self._get_data()\n logger.debug('sending %s', data)\n urllib2.urlopen(\n urllib2.Request(\n url,\n json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'call',\n 'params': data,\n }),\n {\n 'Content-Type': 'application/json',\n }))\n\n @api.model\n def _install_default_url(self):\n \"\"\"Set up a default URL.\"\"\"\n conf = self.env[\"ir.config_parameter\"]\n name = \"dead_mans_switch_client.url\"\n param = conf.get_param(name)\n\n if not param:\n url = \"{}/dead_mans_switch/alive\".format(\n conf.get_param(\n \"report.url\",\n conf.get_param(\n \"web.base.url\",\n \"http://localhost\")))\n conf.set_param(name, url)\n", "path": "dead_mans_switch_client/models/dead_mans_switch_client.py"}]} | 1,164 | 528 |
gh_patches_debug_20868 | rasdani/github-patches | git_diff | pytorch__vision-2654 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs of some functions written are missing
## 📚 Documentation
A simple issue, Docs are missing on the torchvision website for following functions written in torchvision.
I guess we should add these docs on the webpage, as end-users will benefit from using these functions.
Most people will not look at source code to find these functions but refer to docs.
Missing docs that I found
- [x] Image reading functions [here](https://github.com/pytorch/vision/blob/master/torchvision/io/image.py)
We have docs for video io functions, so maybe image should too be there.
- [x] Torchvision ops from [boxes.py](https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py). Docs are added for NMS. but we are missing IoU, Box area and some classes. Partly fixed in #2642
Please do let me know if some other docs or missing as well.
Also, I can raise a PR to fix these, please do let me know if it is needed!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/io/__init__.py`
Content:
```
1 from ._video_opt import (
2 Timebase,
3 VideoMetaData,
4 _HAS_VIDEO_OPT,
5 _probe_video_from_file,
6 _probe_video_from_memory,
7 _read_video_from_file,
8 _read_video_from_memory,
9 _read_video_timestamps_from_file,
10 _read_video_timestamps_from_memory,
11 )
12 from .video import (
13 read_video,
14 read_video_timestamps,
15 write_video,
16 )
17
18
19 __all__ = [
20 "write_video",
21 "read_video",
22 "read_video_timestamps",
23 "_read_video_from_file",
24 "_read_video_timestamps_from_file",
25 "_probe_video_from_file",
26 "_read_video_from_memory",
27 "_read_video_timestamps_from_memory",
28 "_probe_video_from_memory",
29 "_HAS_VIDEO_OPT",
30 "_read_video_clip_from_memory",
31 "_read_video_meta_data",
32 "VideoMetaData",
33 "Timebase"
34 ]
35
```
Path: `torchvision/ops/__init__.py`
Content:
```
1 from .boxes import nms, box_iou
2 from .new_empty_tensor import _new_empty_tensor
3 from .deform_conv import deform_conv2d, DeformConv2d
4 from .roi_align import roi_align, RoIAlign
5 from .roi_pool import roi_pool, RoIPool
6 from .ps_roi_align import ps_roi_align, PSRoIAlign
7 from .ps_roi_pool import ps_roi_pool, PSRoIPool
8 from .poolers import MultiScaleRoIAlign
9 from .feature_pyramid_network import FeaturePyramidNetwork
10
11 from ._register_onnx_ops import _register_custom_op
12
13 _register_custom_op()
14
15
16 __all__ = [
17 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',
18 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',
19 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py
--- a/torchvision/io/__init__.py
+++ b/torchvision/io/__init__.py
@@ -15,7 +15,6 @@
write_video,
)
-
__all__ = [
"write_video",
"read_video",
diff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py
--- a/torchvision/ops/__init__.py
+++ b/torchvision/ops/__init__.py
@@ -1,4 +1,4 @@
-from .boxes import nms, box_iou
+from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou
from .new_empty_tensor import _new_empty_tensor
from .deform_conv import deform_conv2d, DeformConv2d
from .roi_align import roi_align, RoIAlign
@@ -14,7 +14,8 @@
__all__ = [
- 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',
+ 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',
+ 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',
'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',
'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'
]
| {"golden_diff": "diff --git a/torchvision/io/__init__.py b/torchvision/io/__init__.py\n--- a/torchvision/io/__init__.py\n+++ b/torchvision/io/__init__.py\n@@ -15,7 +15,6 @@\n write_video,\n )\n \n-\n __all__ = [\n \"write_video\",\n \"read_video\",\ndiff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py\n--- a/torchvision/ops/__init__.py\n+++ b/torchvision/ops/__init__.py\n@@ -1,4 +1,4 @@\n-from .boxes import nms, box_iou\n+from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\n from .new_empty_tensor import _new_empty_tensor\n from .deform_conv import deform_conv2d, DeformConv2d\n from .roi_align import roi_align, RoIAlign\n@@ -14,7 +14,8 @@\n \n \n __all__ = [\n- 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',\n+ 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n+ 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n ]\n", "issue": "Docs of some functions written are missing\n## \ud83d\udcda Documentation\r\n\r\nA simple issue, Docs are missing on the torchvision website for following functions written in torchvision.\r\n\r\nI guess we should add these docs on the webpage, as end-users will benefit from using these functions. \r\n\r\nMost people will not look at source code to find these functions but refer to docs.\r\n\r\nMissing docs that I found\r\n\r\n- [x] Image reading functions [here](https://github.com/pytorch/vision/blob/master/torchvision/io/image.py)\r\nWe have docs for video io functions, so maybe image should too be there.\r\n\r\n- [x] Torchvision ops from [boxes.py](https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py). Docs are added for NMS. but we are missing IoU, Box area and some classes. Partly fixed in #2642 \r\n\r\nPlease do let me know if some other docs or missing as well.\r\n\r\nAlso, I can raise a PR to fix these, please do let me know if it is needed!\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from ._video_opt import (\n Timebase,\n VideoMetaData,\n _HAS_VIDEO_OPT,\n _probe_video_from_file,\n _probe_video_from_memory,\n _read_video_from_file,\n _read_video_from_memory,\n _read_video_timestamps_from_file,\n _read_video_timestamps_from_memory,\n)\nfrom .video import (\n read_video,\n read_video_timestamps,\n write_video,\n)\n\n\n__all__ = [\n \"write_video\",\n \"read_video\",\n \"read_video_timestamps\",\n \"_read_video_from_file\",\n \"_read_video_timestamps_from_file\",\n \"_probe_video_from_file\",\n \"_read_video_from_memory\",\n \"_read_video_timestamps_from_memory\",\n \"_probe_video_from_memory\",\n \"_HAS_VIDEO_OPT\",\n \"_read_video_clip_from_memory\",\n \"_read_video_meta_data\",\n \"VideoMetaData\",\n \"Timebase\"\n]\n", "path": "torchvision/io/__init__.py"}, {"content": "from .boxes import nms, box_iou\nfrom .new_empty_tensor import _new_empty_tensor\nfrom .deform_conv import deform_conv2d, DeformConv2d\nfrom .roi_align import roi_align, RoIAlign\nfrom .roi_pool import roi_pool, RoIPool\nfrom .ps_roi_align import ps_roi_align, PSRoIAlign\nfrom .ps_roi_pool import ps_roi_pool, PSRoIPool\nfrom .poolers import MultiScaleRoIAlign\nfrom .feature_pyramid_network import FeaturePyramidNetwork\n\nfrom ._register_onnx_ops import _register_custom_op\n\n_register_custom_op()\n\n\n__all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n]\n", "path": "torchvision/ops/__init__.py"}], "after_files": [{"content": "from ._video_opt import (\n Timebase,\n VideoMetaData,\n _HAS_VIDEO_OPT,\n _probe_video_from_file,\n _probe_video_from_memory,\n _read_video_from_file,\n _read_video_from_memory,\n _read_video_timestamps_from_file,\n _read_video_timestamps_from_memory,\n)\nfrom .video import (\n read_video,\n read_video_timestamps,\n write_video,\n)\n\n__all__ = [\n \"write_video\",\n \"read_video\",\n \"read_video_timestamps\",\n \"_read_video_from_file\",\n \"_read_video_timestamps_from_file\",\n \"_probe_video_from_file\",\n \"_read_video_from_memory\",\n \"_read_video_timestamps_from_memory\",\n \"_probe_video_from_memory\",\n \"_HAS_VIDEO_OPT\",\n \"_read_video_clip_from_memory\",\n \"_read_video_meta_data\",\n \"VideoMetaData\",\n \"Timebase\"\n]\n", "path": "torchvision/io/__init__.py"}, {"content": "from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\nfrom .new_empty_tensor import _new_empty_tensor\nfrom .deform_conv import deform_conv2d, DeformConv2d\nfrom .roi_align import roi_align, RoIAlign\nfrom .roi_pool import roi_pool, RoIPool\nfrom .ps_roi_align import ps_roi_align, PSRoIAlign\nfrom .ps_roi_pool import ps_roi_pool, PSRoIPool\nfrom .poolers import MultiScaleRoIAlign\nfrom .feature_pyramid_network import FeaturePyramidNetwork\n\nfrom ._register_onnx_ops import _register_custom_op\n\n_register_custom_op()\n\n\n__all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n]\n", "path": "torchvision/ops/__init__.py"}]} | 1,004 | 376 |
gh_patches_debug_59180 | rasdani/github-patches | git_diff | TheAlgorithms__Python-295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ProjectEuler -- Problem 1 -- solv2.py -- Error
For the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168```
See [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Project Euler/Problem 01/sol2.py`
Content:
```
1 '''
2 Problem Statement:
3 If we list all the natural numbers below 10 that are multiples of 3 or 5,
4 we get 3,5,6 and 9. The sum of these multiples is 23.
5 Find the sum of all the multiples of 3 or 5 below N.
6 '''
7 from __future__ import print_function
8 try:
9 raw_input # Python 2
10 except NameError:
11 raw_input = input # Python 3
12 n = int(raw_input().strip())
13 sum = 0
14 terms = (n-1)/3
15 sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.
16 terms = (n-1)/5
17 sum+= ((terms)*(10+(terms-1)*5))/2
18 terms = (n-1)/15
19 sum-= ((terms)*(30+(terms-1)*15))/2
20 print(sum)
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py
--- a/Project Euler/Problem 01/sol2.py
+++ b/Project Euler/Problem 01/sol2.py
@@ -11,10 +11,10 @@
raw_input = input # Python 3
n = int(raw_input().strip())
sum = 0
-terms = (n-1)/3
-sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.
-terms = (n-1)/5
-sum+= ((terms)*(10+(terms-1)*5))/2
-terms = (n-1)/15
-sum-= ((terms)*(30+(terms-1)*15))/2
+terms = (n-1)//3
+sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.
+terms = (n-1)//5
+sum+= ((terms)*(10+(terms-1)*5))//2
+terms = (n-1)//15
+sum-= ((terms)*(30+(terms-1)*15))//2
print(sum)
| {"golden_diff": "diff --git a/Project Euler/Problem 01/sol2.py b/Project Euler/Problem 01/sol2.py\n--- a/Project Euler/Problem 01/sol2.py\t\n+++ b/Project Euler/Problem 01/sol2.py\t\n@@ -11,10 +11,10 @@\n raw_input = input # Python 3\n n = int(raw_input().strip())\n sum = 0\n-terms = (n-1)/3\n-sum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\n-terms = (n-1)/5\n-sum+= ((terms)*(10+(terms-1)*5))/2\n-terms = (n-1)/15\n-sum-= ((terms)*(30+(terms-1)*15))/2\n+terms = (n-1)//3\n+sum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\n+terms = (n-1)//5\n+sum+= ((terms)*(10+(terms-1)*5))//2\n+terms = (n-1)//15\n+sum-= ((terms)*(30+(terms-1)*15))//2\n print(sum)\n", "issue": "ProjectEuler -- Problem 1 -- solv2.py -- Error\nFor the Input ```1000``` I get ```233366.4```. The correct answer should be ```233168``` \r\nSee [file](https://github.com/TheAlgorithms/Python/blob/master/Project%20Euler/Problem%2001/sol2.py)\n", "before_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)/3\nsum+= ((terms)*(6+(terms-1)*3))/2 #sum of an A.P.\nterms = (n-1)/5\nsum+= ((terms)*(10+(terms-1)*5))/2\nterms = (n-1)/15\nsum-= ((terms)*(30+(terms-1)*15))/2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}], "after_files": [{"content": "'''\nProblem Statement:\nIf we list all the natural numbers below 10 that are multiples of 3 or 5,\nwe get 3,5,6 and 9. The sum of these multiples is 23.\nFind the sum of all the multiples of 3 or 5 below N.\n'''\nfrom __future__ import print_function\ntry:\n raw_input # Python 2\nexcept NameError:\n raw_input = input # Python 3\nn = int(raw_input().strip())\nsum = 0\nterms = (n-1)//3\nsum+= ((terms)*(6+(terms-1)*3))//2 #sum of an A.P.\nterms = (n-1)//5\nsum+= ((terms)*(10+(terms-1)*5))//2\nterms = (n-1)//15\nsum-= ((terms)*(30+(terms-1)*15))//2\nprint(sum)\n", "path": "Project Euler/Problem 01/sol2.py"}]} | 579 | 277 |
gh_patches_debug_18615 | rasdani/github-patches | git_diff | vyperlang__vyper-555 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log topic and data allow byte array longer than 32 bytes.
### What's your issue about?
When packing data/topic for log, if the the actual argument is a byte array variable, there is no check for the actual length of the variable.
e.g.,
```
MyLog: __log__({arg1: indexed(bytes<=2000)})
@public
def foo():
a: bytes<=100
log.MyLog(a)
```
This program should be rejected by is not.
### How can it be fixed?
Add check in event_sig, pack_arg_by_32 and pack_logging_topic.
#### Cute Animal Picture

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `viper/signatures/event_signature.py`
Content:
```
1 from viper.types import get_size_of_type, canonicalize_type, parse_type, \
2 ByteArrayType
3 from viper.utils import sha3, is_varname_valid, bytes_to_int
4 import ast
5 from viper.function_signature import VariableRecord
6 from viper.exceptions import InvalidTypeException, VariableDeclarationException
7
8
9 # Event signature object
10 class EventSignature():
11 def __init__(self, name, args, indexed_list, event_id, sig):
12 self.name = name
13 self.args = args
14 self.indexed_list = indexed_list
15 self.sig = sig
16 self.event_id = event_id
17
18 # Get a signature from an event declaration
19 @classmethod
20 def from_declaration(cls, code):
21 name = code.target.id
22 pos = 0
23 # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...
24 args = []
25 indexed_list = []
26 topics_count = 1
27 if code.annotation.args:
28 keys = code.annotation.args[0].keys
29 values = code.annotation.args[0].values
30 for i in range(len(keys)):
31 typ = values[i]
32 arg = keys[i].id
33 if isinstance(typ, ast.Call):
34 # Check to see if argument is a topic
35 if typ.func.id == 'indexed':
36 typ = values[i].args[0]
37 indexed_list.append(True)
38 topics_count += 1
39 else:
40 raise VariableDeclarationException("Only indexed keyword is allowed", arg)
41 else:
42 if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:
43 raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.")
44 indexed_list.append(False)
45 if topics_count > 4:
46 raise VariableDeclarationException("Maximum of 3 topics {} given".format(topics_count - 1), arg)
47 if not isinstance(arg, str):
48 raise VariableDeclarationException("Argument name invalid", arg)
49 if not typ:
50 raise InvalidTypeException("Argument must have type", arg)
51 if not is_varname_valid(arg):
52 raise VariableDeclarationException("Argument name invalid or reserved: " + arg, arg)
53 if arg in (x.name for x in args):
54 raise VariableDeclarationException("Duplicate function argument name: " + arg, arg)
55 parsed_type = parse_type(typ, None)
56 args.append(VariableRecord(arg, pos, parsed_type, False))
57 if isinstance(parsed_type, ByteArrayType):
58 pos += 32
59 else:
60 pos += get_size_of_type(parsed_type) * 32
61 sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')'
62 event_id = bytes_to_int(sha3(bytes(sig, 'utf-8')))
63 return cls(name, args, indexed_list, event_id, sig)
64
65 def to_abi_dict(self):
66 return {
67 "name": self.name,
68 "inputs": [{"type": canonicalize_type(arg.typ, True), "name": arg.name, "indexed": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [],
69 "anonymous": False,
70 "type": "event"
71 }
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/viper/signatures/event_signature.py b/viper/signatures/event_signature.py
--- a/viper/signatures/event_signature.py
+++ b/viper/signatures/event_signature.py
@@ -39,9 +39,9 @@
else:
raise VariableDeclarationException("Only indexed keyword is allowed", arg)
else:
- if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:
- raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.")
indexed_list.append(False)
+ if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:
+ raise VariableDeclarationException("Can only log a maximum of 32 bytes at a time.")
if topics_count > 4:
raise VariableDeclarationException("Maximum of 3 topics {} given".format(topics_count - 1), arg)
if not isinstance(arg, str):
| {"golden_diff": "diff --git a/viper/signatures/event_signature.py b/viper/signatures/event_signature.py\n--- a/viper/signatures/event_signature.py\n+++ b/viper/signatures/event_signature.py\n@@ -39,9 +39,9 @@\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n- if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n- raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n indexed_list.append(False)\n+ if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n+ raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n", "issue": "Log topic and data allow byte array longer than 32 bytes.\n### What's your issue about?\r\nWhen packing data/topic for log, if the the actual argument is a byte array variable, there is no check for the actual length of the variable.\r\ne.g.,\r\n```\r\nMyLog: __log__({arg1: indexed(bytes<=2000)})\r\n\r\n@public\r\ndef foo():\r\n a: bytes<=100\r\n log.MyLog(a)\r\n```\r\nThis program should be rejected by is not.\r\n\r\n### How can it be fixed?\r\n\r\nAdd check in event_sig, pack_arg_by_32 and pack_logging_topic.\r\n\r\n#### Cute Animal Picture\r\n\r\n\r\n\n", "before_files": [{"content": "from viper.types import get_size_of_type, canonicalize_type, parse_type, \\\n ByteArrayType\nfrom viper.utils import sha3, is_varname_valid, bytes_to_int\nimport ast\nfrom viper.function_signature import VariableRecord\nfrom viper.exceptions import InvalidTypeException, VariableDeclarationException\n\n\n# Event signature object\nclass EventSignature():\n def __init__(self, name, args, indexed_list, event_id, sig):\n self.name = name\n self.args = args\n self.indexed_list = indexed_list\n self.sig = sig\n self.event_id = event_id\n\n # Get a signature from an event declaration\n @classmethod\n def from_declaration(cls, code):\n name = code.target.id\n pos = 0\n # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...\n args = []\n indexed_list = []\n topics_count = 1\n if code.annotation.args:\n keys = code.annotation.args[0].keys\n values = code.annotation.args[0].values\n for i in range(len(keys)):\n typ = values[i]\n arg = keys[i].id\n if isinstance(typ, ast.Call):\n # Check to see if argument is a topic\n if typ.func.id == 'indexed':\n typ = values[i].args[0]\n indexed_list.append(True)\n topics_count += 1\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n indexed_list.append(False)\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n raise VariableDeclarationException(\"Argument name invalid\", arg)\n if not typ:\n raise InvalidTypeException(\"Argument must have type\", arg)\n if not is_varname_valid(arg):\n raise VariableDeclarationException(\"Argument name invalid or reserved: \" + arg, arg)\n if arg in (x.name for x in args):\n raise VariableDeclarationException(\"Duplicate function argument name: \" + arg, arg)\n parsed_type = parse_type(typ, None)\n args.append(VariableRecord(arg, pos, parsed_type, False))\n if isinstance(parsed_type, ByteArrayType):\n pos += 32\n else:\n pos += get_size_of_type(parsed_type) * 32\n sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')'\n event_id = bytes_to_int(sha3(bytes(sig, 'utf-8')))\n return cls(name, args, indexed_list, event_id, sig)\n\n def to_abi_dict(self):\n return {\n \"name\": self.name,\n \"inputs\": [{\"type\": canonicalize_type(arg.typ, True), \"name\": arg.name, \"indexed\": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [],\n \"anonymous\": False,\n \"type\": \"event\"\n }\n", "path": "viper/signatures/event_signature.py"}], "after_files": [{"content": "from viper.types import get_size_of_type, canonicalize_type, parse_type, \\\n ByteArrayType\nfrom viper.utils import sha3, is_varname_valid, bytes_to_int\nimport ast\nfrom viper.function_signature import VariableRecord\nfrom viper.exceptions import InvalidTypeException, VariableDeclarationException\n\n\n# Event signature object\nclass EventSignature():\n def __init__(self, name, args, indexed_list, event_id, sig):\n self.name = name\n self.args = args\n self.indexed_list = indexed_list\n self.sig = sig\n self.event_id = event_id\n\n # Get a signature from an event declaration\n @classmethod\n def from_declaration(cls, code):\n name = code.target.id\n pos = 0\n # Determine the arguments, expects something of the form def foo(arg1: num, arg2: num ...\n args = []\n indexed_list = []\n topics_count = 1\n if code.annotation.args:\n keys = code.annotation.args[0].keys\n values = code.annotation.args[0].values\n for i in range(len(keys)):\n typ = values[i]\n arg = keys[i].id\n if isinstance(typ, ast.Call):\n # Check to see if argument is a topic\n if typ.func.id == 'indexed':\n typ = values[i].args[0]\n indexed_list.append(True)\n topics_count += 1\n else:\n raise VariableDeclarationException(\"Only indexed keyword is allowed\", arg)\n else:\n indexed_list.append(False)\n if hasattr(typ, 'left') and typ.left.id == 'bytes' and typ.comparators[0].n > 32:\n raise VariableDeclarationException(\"Can only log a maximum of 32 bytes at a time.\")\n if topics_count > 4:\n raise VariableDeclarationException(\"Maximum of 3 topics {} given\".format(topics_count - 1), arg)\n if not isinstance(arg, str):\n raise VariableDeclarationException(\"Argument name invalid\", arg)\n if not typ:\n raise InvalidTypeException(\"Argument must have type\", arg)\n if not is_varname_valid(arg):\n raise VariableDeclarationException(\"Argument name invalid or reserved: \" + arg, arg)\n if arg in (x.name for x in args):\n raise VariableDeclarationException(\"Duplicate function argument name: \" + arg, arg)\n parsed_type = parse_type(typ, None)\n args.append(VariableRecord(arg, pos, parsed_type, False))\n if isinstance(parsed_type, ByteArrayType):\n pos += 32\n else:\n pos += get_size_of_type(parsed_type) * 32\n sig = name + '(' + ','.join([canonicalize_type(arg.typ, True) for arg in args]) + ')'\n event_id = bytes_to_int(sha3(bytes(sig, 'utf-8')))\n return cls(name, args, indexed_list, event_id, sig)\n\n def to_abi_dict(self):\n return {\n \"name\": self.name,\n \"inputs\": [{\"type\": canonicalize_type(arg.typ, True), \"name\": arg.name, \"indexed\": self.indexed_list[pos]} for pos, arg in enumerate(self.args)] if self.args else [],\n \"anonymous\": False,\n \"type\": \"event\"\n }\n", "path": "viper/signatures/event_signature.py"}]} | 1,295 | 222 |
gh_patches_debug_21110 | rasdani/github-patches | git_diff | iterative__dvc-1978 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
logger: still ignoring the context of the progress bar
version: `0.40.0+6408b5`
When trying to push to an SSH with the `ask_password` option set to `True`:
```
# [############ ] 40% Collecting informationEnter a private key passphrase or a password for host 'localhost' port '22' user 'mroutis':
```
This behavior should be handle at: https://github.com/iterative/dvc/blob/6408b58b8daddc297467453bcd130c07b09cd46b/dvc/logger.py#L134-L140
It should be tested under `tests/unit/test_logger.py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/logger.py`
Content:
```
1 """Manages logging configuration for dvc repo."""
2
3 from __future__ import unicode_literals
4
5 from dvc.utils.compat import str, StringIO
6
7 import logging
8 import logging.handlers
9 import logging.config
10 import colorama
11
12
13 class ExcludeErrorsFilter(logging.Filter):
14 def filter(self, record):
15 return record.levelno < logging.ERROR
16
17
18 class ColorFormatter(logging.Formatter):
19 """Enable color support when logging to a terminal that supports it.
20
21 Color support on Windows versions that do not support ANSI color codes is
22 enabled by use of the colorama__ library.
23 See the colorama documentation for details.
24
25 __ https://pypi.python.org/pypi/colorama
26
27 For records containing `exc_info`, it will use a custom `_walk_exc` to
28 retrieve the whole tracebak.
29 """
30
31 color_code = {
32 "DEBUG": colorama.Fore.BLUE,
33 "INFO": "",
34 "WARNING": colorama.Fore.YELLOW,
35 "ERROR": colorama.Fore.RED,
36 "CRITICAL": colorama.Fore.RED,
37 }
38
39 footer = (
40 "{yellow}Having any troubles?{nc}."
41 " Hit us up at {blue}https://dvc.org/support{nc},"
42 " we are always happy to help!"
43 ).format(
44 blue=colorama.Fore.BLUE,
45 nc=colorama.Fore.RESET,
46 yellow=colorama.Fore.YELLOW,
47 )
48
49 def format(self, record):
50 if record.levelname == "INFO":
51 return record.msg
52
53 if record.levelname == "ERROR" or record.levelname == "CRITICAL":
54 exception, stack_trace = self._parse_exc(record.exc_info)
55
56 return (
57 "{color}{levelname}{nc}: {description}"
58 "{stack_trace}\n"
59 "\n"
60 "{footer}"
61 ).format(
62 color=self.color_code.get(record.levelname, ""),
63 nc=colorama.Fore.RESET,
64 levelname=record.levelname,
65 description=self._description(record.msg, exception),
66 msg=record.msg,
67 stack_trace=stack_trace,
68 footer=self.footer,
69 )
70
71 return "{color}{levelname}{nc}: {msg}".format(
72 color=self.color_code.get(record.levelname, ""),
73 nc=colorama.Fore.RESET,
74 levelname=record.levelname,
75 msg=record.msg,
76 )
77
78 def _description(self, message, exception):
79 description = ""
80
81 if exception and message:
82 description = "{message} - {exception}"
83 elif exception:
84 description = "{exception}"
85 elif message:
86 description = "{message}"
87
88 return description.format(message=message, exception=exception)
89
90 def _walk_exc(self, exc_info):
91 import traceback
92
93 buffer = StringIO()
94
95 traceback.print_exception(*exc_info, file=buffer)
96
97 exc = exc_info[1]
98 tb = buffer.getvalue()
99
100 exc_list = [str(exc)]
101 tb_list = [tb]
102
103 # NOTE: parsing chained exceptions. See dvc/exceptions.py for more info
104 while hasattr(exc, "cause") and exc.cause:
105 exc_list.append(str(exc.cause))
106 if hasattr(exc, "cause_tb") and exc.cause_tb:
107 tb_list.insert(0, str(exc.cause_tb))
108 exc = exc.cause
109
110 return exc_list, tb_list
111
112 def _parse_exc(self, exc_info):
113 if not exc_info:
114 return (None, "")
115
116 exc_list, tb_list = self._walk_exc(exc_info)
117
118 exception = ": ".join(exc_list)
119
120 if logging.getLogger("dvc").getEffectiveLevel() == logging.DEBUG:
121 stack_trace = (
122 "\n" "{red}{line}{nc}\n" "{stack_trace}" "{red}{line}{nc}"
123 ).format(
124 red=colorama.Fore.RED,
125 nc=colorama.Fore.RESET,
126 line="-" * 60,
127 stack_trace="\n".join(tb_list),
128 )
129 else:
130 stack_trace = ""
131
132 return (exception, stack_trace)
133
134 def _progress_aware(self):
135 """Add a new line if progress bar hasn't finished"""
136 from dvc.progress import progress
137
138 if not progress.is_finished:
139 progress._print()
140 progress.clearln()
141
142
143 def setup(level=logging.INFO):
144 colorama.init()
145
146 logging.config.dictConfig(
147 {
148 "version": 1,
149 "filters": {"exclude_errors": {"()": ExcludeErrorsFilter}},
150 "formatters": {"color": {"()": ColorFormatter}},
151 "handlers": {
152 "console": {
153 "class": "logging.StreamHandler",
154 "level": "DEBUG",
155 "formatter": "color",
156 "stream": "ext://sys.stdout",
157 "filters": ["exclude_errors"],
158 },
159 "console_errors": {
160 "class": "logging.StreamHandler",
161 "level": "ERROR",
162 "formatter": "color",
163 "stream": "ext://sys.stderr",
164 },
165 },
166 "loggers": {
167 "dvc": {
168 "level": level,
169 "handlers": ["console", "console_errors"],
170 }
171 },
172 }
173 )
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/logger.py b/dvc/logger.py
--- a/dvc/logger.py
+++ b/dvc/logger.py
@@ -47,6 +47,9 @@
)
def format(self, record):
+ if self._is_visible(record):
+ self._progress_aware()
+
if record.levelname == "INFO":
return record.msg
@@ -75,6 +78,12 @@
msg=record.msg,
)
+ def _current_level(self):
+ return logging.getLogger("dvc").getEffectiveLevel()
+
+ def _is_visible(self, record):
+ return record.levelno >= self._current_level()
+
def _description(self, message, exception):
description = ""
@@ -117,7 +126,7 @@
exception = ": ".join(exc_list)
- if logging.getLogger("dvc").getEffectiveLevel() == logging.DEBUG:
+ if self._current_level() == logging.DEBUG:
stack_trace = (
"\n" "{red}{line}{nc}\n" "{stack_trace}" "{red}{line}{nc}"
).format(
| {"golden_diff": "diff --git a/dvc/logger.py b/dvc/logger.py\n--- a/dvc/logger.py\n+++ b/dvc/logger.py\n@@ -47,6 +47,9 @@\n )\n \n def format(self, record):\n+ if self._is_visible(record):\n+ self._progress_aware()\n+\n if record.levelname == \"INFO\":\n return record.msg\n \n@@ -75,6 +78,12 @@\n msg=record.msg,\n )\n \n+ def _current_level(self):\n+ return logging.getLogger(\"dvc\").getEffectiveLevel()\n+\n+ def _is_visible(self, record):\n+ return record.levelno >= self._current_level()\n+\n def _description(self, message, exception):\n description = \"\"\n \n@@ -117,7 +126,7 @@\n \n exception = \": \".join(exc_list)\n \n- if logging.getLogger(\"dvc\").getEffectiveLevel() == logging.DEBUG:\n+ if self._current_level() == logging.DEBUG:\n stack_trace = (\n \"\\n\" \"{red}{line}{nc}\\n\" \"{stack_trace}\" \"{red}{line}{nc}\"\n ).format(\n", "issue": "logger: still ignoring the context of the progress bar\nversion: `0.40.0+6408b5`\r\n\r\nWhen trying to push to an SSH with the `ask_password` option set to `True`:\r\n```\r\n# [############ ] 40% Collecting informationEnter a private key passphrase or a password for host 'localhost' port '22' user 'mroutis':\r\n```\r\n\r\nThis behavior should be handle at: https://github.com/iterative/dvc/blob/6408b58b8daddc297467453bcd130c07b09cd46b/dvc/logger.py#L134-L140\r\n\r\nIt should be tested under `tests/unit/test_logger.py`\n", "before_files": [{"content": "\"\"\"Manages logging configuration for dvc repo.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom dvc.utils.compat import str, StringIO\n\nimport logging\nimport logging.handlers\nimport logging.config\nimport colorama\n\n\nclass ExcludeErrorsFilter(logging.Filter):\n def filter(self, record):\n return record.levelno < logging.ERROR\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Enable color support when logging to a terminal that supports it.\n\n Color support on Windows versions that do not support ANSI color codes is\n enabled by use of the colorama__ library.\n See the colorama documentation for details.\n\n __ https://pypi.python.org/pypi/colorama\n\n For records containing `exc_info`, it will use a custom `_walk_exc` to\n retrieve the whole tracebak.\n \"\"\"\n\n color_code = {\n \"DEBUG\": colorama.Fore.BLUE,\n \"INFO\": \"\",\n \"WARNING\": colorama.Fore.YELLOW,\n \"ERROR\": colorama.Fore.RED,\n \"CRITICAL\": colorama.Fore.RED,\n }\n\n footer = (\n \"{yellow}Having any troubles?{nc}.\"\n \" Hit us up at {blue}https://dvc.org/support{nc},\"\n \" we are always happy to help!\"\n ).format(\n blue=colorama.Fore.BLUE,\n nc=colorama.Fore.RESET,\n yellow=colorama.Fore.YELLOW,\n )\n\n def format(self, record):\n if record.levelname == \"INFO\":\n return record.msg\n\n if record.levelname == \"ERROR\" or record.levelname == \"CRITICAL\":\n exception, stack_trace = self._parse_exc(record.exc_info)\n\n return (\n \"{color}{levelname}{nc}: {description}\"\n \"{stack_trace}\\n\"\n \"\\n\"\n \"{footer}\"\n ).format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n description=self._description(record.msg, exception),\n msg=record.msg,\n stack_trace=stack_trace,\n footer=self.footer,\n )\n\n return \"{color}{levelname}{nc}: {msg}\".format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n msg=record.msg,\n )\n\n def _description(self, message, exception):\n description = \"\"\n\n if exception and message:\n description = \"{message} - {exception}\"\n elif exception:\n description = \"{exception}\"\n elif message:\n description = \"{message}\"\n\n return description.format(message=message, exception=exception)\n\n def _walk_exc(self, exc_info):\n import traceback\n\n buffer = StringIO()\n\n traceback.print_exception(*exc_info, file=buffer)\n\n exc = exc_info[1]\n tb = buffer.getvalue()\n\n exc_list = [str(exc)]\n tb_list = [tb]\n\n # NOTE: parsing chained exceptions. See dvc/exceptions.py for more info\n while hasattr(exc, \"cause\") and exc.cause:\n exc_list.append(str(exc.cause))\n if hasattr(exc, \"cause_tb\") and exc.cause_tb:\n tb_list.insert(0, str(exc.cause_tb))\n exc = exc.cause\n\n return exc_list, tb_list\n\n def _parse_exc(self, exc_info):\n if not exc_info:\n return (None, \"\")\n\n exc_list, tb_list = self._walk_exc(exc_info)\n\n exception = \": \".join(exc_list)\n\n if logging.getLogger(\"dvc\").getEffectiveLevel() == logging.DEBUG:\n stack_trace = (\n \"\\n\" \"{red}{line}{nc}\\n\" \"{stack_trace}\" \"{red}{line}{nc}\"\n ).format(\n red=colorama.Fore.RED,\n nc=colorama.Fore.RESET,\n line=\"-\" * 60,\n stack_trace=\"\\n\".join(tb_list),\n )\n else:\n stack_trace = \"\"\n\n return (exception, stack_trace)\n\n def _progress_aware(self):\n \"\"\"Add a new line if progress bar hasn't finished\"\"\"\n from dvc.progress import progress\n\n if not progress.is_finished:\n progress._print()\n progress.clearln()\n\n\ndef setup(level=logging.INFO):\n colorama.init()\n\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"filters\": {\"exclude_errors\": {\"()\": ExcludeErrorsFilter}},\n \"formatters\": {\"color\": {\"()\": ColorFormatter}},\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stdout\",\n \"filters\": [\"exclude_errors\"],\n },\n \"console_errors\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"ERROR\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stderr\",\n },\n },\n \"loggers\": {\n \"dvc\": {\n \"level\": level,\n \"handlers\": [\"console\", \"console_errors\"],\n }\n },\n }\n )\n", "path": "dvc/logger.py"}], "after_files": [{"content": "\"\"\"Manages logging configuration for dvc repo.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom dvc.utils.compat import str, StringIO\n\nimport logging\nimport logging.handlers\nimport logging.config\nimport colorama\n\n\nclass ExcludeErrorsFilter(logging.Filter):\n def filter(self, record):\n return record.levelno < logging.ERROR\n\n\nclass ColorFormatter(logging.Formatter):\n \"\"\"Enable color support when logging to a terminal that supports it.\n\n Color support on Windows versions that do not support ANSI color codes is\n enabled by use of the colorama__ library.\n See the colorama documentation for details.\n\n __ https://pypi.python.org/pypi/colorama\n\n For records containing `exc_info`, it will use a custom `_walk_exc` to\n retrieve the whole tracebak.\n \"\"\"\n\n color_code = {\n \"DEBUG\": colorama.Fore.BLUE,\n \"INFO\": \"\",\n \"WARNING\": colorama.Fore.YELLOW,\n \"ERROR\": colorama.Fore.RED,\n \"CRITICAL\": colorama.Fore.RED,\n }\n\n footer = (\n \"{yellow}Having any troubles?{nc}.\"\n \" Hit us up at {blue}https://dvc.org/support{nc},\"\n \" we are always happy to help!\"\n ).format(\n blue=colorama.Fore.BLUE,\n nc=colorama.Fore.RESET,\n yellow=colorama.Fore.YELLOW,\n )\n\n def format(self, record):\n if self._is_visible(record):\n self._progress_aware()\n\n if record.levelname == \"INFO\":\n return record.msg\n\n if record.levelname == \"ERROR\" or record.levelname == \"CRITICAL\":\n exception, stack_trace = self._parse_exc(record.exc_info)\n\n return (\n \"{color}{levelname}{nc}: {description}\"\n \"{stack_trace}\\n\"\n \"\\n\"\n \"{footer}\"\n ).format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n description=self._description(record.msg, exception),\n msg=record.msg,\n stack_trace=stack_trace,\n footer=self.footer,\n )\n\n return \"{color}{levelname}{nc}: {msg}\".format(\n color=self.color_code.get(record.levelname, \"\"),\n nc=colorama.Fore.RESET,\n levelname=record.levelname,\n msg=record.msg,\n )\n\n def _current_level(self):\n return logging.getLogger(\"dvc\").getEffectiveLevel()\n\n def _is_visible(self, record):\n return record.levelno >= self._current_level()\n\n def _description(self, message, exception):\n description = \"\"\n\n if exception and message:\n description = \"{message} - {exception}\"\n elif exception:\n description = \"{exception}\"\n elif message:\n description = \"{message}\"\n\n return description.format(message=message, exception=exception)\n\n def _walk_exc(self, exc_info):\n import traceback\n\n buffer = StringIO()\n\n traceback.print_exception(*exc_info, file=buffer)\n\n exc = exc_info[1]\n tb = buffer.getvalue()\n\n exc_list = [str(exc)]\n tb_list = [tb]\n\n # NOTE: parsing chained exceptions. See dvc/exceptions.py for more info\n while hasattr(exc, \"cause\") and exc.cause:\n exc_list.append(str(exc.cause))\n if hasattr(exc, \"cause_tb\") and exc.cause_tb:\n tb_list.insert(0, str(exc.cause_tb))\n exc = exc.cause\n\n return exc_list, tb_list\n\n def _parse_exc(self, exc_info):\n if not exc_info:\n return (None, \"\")\n\n exc_list, tb_list = self._walk_exc(exc_info)\n\n exception = \": \".join(exc_list)\n\n if self._current_level() == logging.DEBUG:\n stack_trace = (\n \"\\n\" \"{red}{line}{nc}\\n\" \"{stack_trace}\" \"{red}{line}{nc}\"\n ).format(\n red=colorama.Fore.RED,\n nc=colorama.Fore.RESET,\n line=\"-\" * 60,\n stack_trace=\"\\n\".join(tb_list),\n )\n else:\n stack_trace = \"\"\n\n return (exception, stack_trace)\n\n def _progress_aware(self):\n \"\"\"Add a new line if progress bar hasn't finished\"\"\"\n from dvc.progress import progress\n\n if not progress.is_finished:\n progress._print()\n progress.clearln()\n\n\ndef setup(level=logging.INFO):\n colorama.init()\n\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"filters\": {\"exclude_errors\": {\"()\": ExcludeErrorsFilter}},\n \"formatters\": {\"color\": {\"()\": ColorFormatter}},\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stdout\",\n \"filters\": [\"exclude_errors\"],\n },\n \"console_errors\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"ERROR\",\n \"formatter\": \"color\",\n \"stream\": \"ext://sys.stderr\",\n },\n },\n \"loggers\": {\n \"dvc\": {\n \"level\": level,\n \"handlers\": [\"console\", \"console_errors\"],\n }\n },\n }\n )\n", "path": "dvc/logger.py"}]} | 1,931 | 251 |
gh_patches_debug_17451 | rasdani/github-patches | git_diff | cal-itp__benefits-950 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make buttons use title-case
## Acceptance Criteria
- [ ] All buttons are using title case
## Additional context
This is according to the design in Figma
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/views.py`
Content:
```
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.shortcuts import redirect
6 from django.template import loader
7 from django.template.response import TemplateResponse
8 from django.urls import reverse
9 from django.utils.translation import gettext as _
10
11 from . import models, session, viewmodels
12 from .middleware import pageview_decorator
13
14 ROUTE_INDEX = "core:index"
15 ROUTE_ELIGIBILITY = "eligibility:index"
16 ROUTE_HELP = "core:help"
17
18 TEMPLATE_PAGE = "core/page.html"
19 TEMPLATE_AGENCY = "core/agency_index.html"
20 TEMPLATE_HELP = "core/help.html"
21
22
23 @pageview_decorator
24 def index(request):
25 """View handler for the main entry page."""
26 session.reset(request)
27
28 agencies = models.TransitAgency.all_active()
29
30 if len(agencies) == 1:
31 agency = agencies[0]
32 return redirect(agency.index_url)
33
34 # generate a button to the landing page for each active agency
35 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]
36 buttons[0].classes.append("mt-3")
37 buttons[0].label = _("core.pages.index.chooseprovider")
38
39 page = viewmodels.Page(
40 title=_("core.pages.index.title"),
41 content_title=_("core.pages.index.content_title"),
42 buttons=buttons,
43 classes="home",
44 )
45
46 return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())
47
48
49 @pageview_decorator
50 def agency_index(request, agency):
51 """View handler for an agency entry page."""
52 session.reset(request)
53 session.update(request, agency=agency, origin=agency.index_url)
54
55 if len(agency.eligibility_verifiers.all()) == 1:
56 return redirect(reverse(ROUTE_ELIGIBILITY))
57
58 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY))
59 button.label = _("core.pages.agency_index.button.label")
60
61 page = viewmodels.Page(
62 title=_("core.pages.agency_index.title"),
63 content_title=_("core.pages.agency_index.content_title"),
64 button=button,
65 classes="home",
66 )
67
68 help_page = reverse(ROUTE_HELP)
69 context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}}
70
71 return TemplateResponse(request, TEMPLATE_AGENCY, context_dict)
72
73
74 @pageview_decorator
75 def agency_public_key(request, agency):
76 """View handler returns an agency's public key as plain text."""
77 return HttpResponse(agency.public_key_data, content_type="text/plain")
78
79
80 @pageview_decorator
81 def help(request):
82 """View handler for the help page."""
83 if session.active_agency(request):
84 agency = session.agency(request)
85 buttons = viewmodels.Button.agency_contact_links(agency)
86 else:
87 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
88
89 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
90
91 page = viewmodels.Page(
92 title=_("core.buttons.help"),
93 content_title=_("core.buttons.help"),
94 buttons=buttons,
95 )
96
97 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())
98
99
100 @pageview_decorator
101 def bad_request(request, exception, template_name="400.html"):
102 """View handler for HTTP 400 Bad Request responses."""
103 if session.active_agency(request):
104 session.update(request, origin=session.agency(request).index_url)
105 else:
106 session.update(request, origin=reverse(ROUTE_INDEX))
107
108 home = viewmodels.Button.home(request)
109 page = viewmodels.ErrorPage.server_error(button=home)
110 t = loader.get_template(template_name)
111
112 return HttpResponseBadRequest(t.render(page.context_dict()))
113
114
115 @pageview_decorator
116 def csrf_failure(request, reason):
117 """
118 View handler for CSRF_FAILURE_VIEW with custom data.
119 """
120 if session.active_agency(request):
121 session.update(request, origin=session.agency(request).index_url)
122 else:
123 session.update(request, origin=reverse(ROUTE_INDEX))
124
125 home = viewmodels.Button.home(request)
126 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
127 t = loader.get_template("400.html")
128
129 return HttpResponseNotFound(t.render(page.context_dict()))
130
131
132 @pageview_decorator
133 def page_not_found(request, exception, template_name="404.html"):
134 """View handler for HTTP 404 Not Found responses."""
135 if session.active_agency(request):
136 session.update(request, origin=session.agency(request).index_url)
137 else:
138 session.update(request, origin=reverse(ROUTE_INDEX))
139
140 home = viewmodels.Button.home(request)
141 # show a more user-friendly message instead of not_found
142 page = viewmodels.ErrorPage.user_error(button=home, path=request.path)
143 t = loader.get_template(template_name)
144
145 return HttpResponseNotFound(t.render(page.context_dict()))
146
147
148 @pageview_decorator
149 def server_error(request, template_name="500.html"):
150 """View handler for HTTP 500 Server Error responses."""
151 if session.active_agency(request):
152 session.update(request, origin=session.agency(request).index_url)
153 else:
154 session.update(request, origin=reverse(ROUTE_INDEX))
155
156 home = viewmodels.Button.home(request)
157 page = viewmodels.ErrorPage.server_error(button=home)
158 t = loader.get_template(template_name)
159
160 return HttpResponseServerError(t.render(page.context_dict()))
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -56,19 +56,15 @@
return redirect(reverse(ROUTE_ELIGIBILITY))
button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY))
- button.label = _("core.pages.agency_index.button.label")
page = viewmodels.Page(
title=_("core.pages.agency_index.title"),
- content_title=_("core.pages.agency_index.content_title"),
+ content_title=_("core.pages.agency_index.mst_cc.content_title"),
button=button,
classes="home",
)
- help_page = reverse(ROUTE_HELP)
- context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}}
-
- return TemplateResponse(request, TEMPLATE_AGENCY, context_dict)
+ return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())
@pageview_decorator
| {"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -56,19 +56,15 @@\n return redirect(reverse(ROUTE_ELIGIBILITY))\n \n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n- button.label = _(\"core.pages.agency_index.button.label\")\n \n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n- content_title=_(\"core.pages.agency_index.content_title\"),\n+ content_title=_(\"core.pages.agency_index.mst_cc.content_title\"),\n button=button,\n classes=\"home\",\n )\n \n- help_page = reverse(ROUTE_HELP)\n- context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n-\n- return TemplateResponse(request, TEMPLATE_AGENCY, context_dict)\n+ return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n \n \n @pageview_decorator\n", "issue": "Make buttons use title-case\n## Acceptance Criteria\r\n- [ ] All buttons are using title case\r\n\r\n## Additional context\r\nThis is according to the design in Figma\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\n\nTEMPLATE_PAGE = \"core/page.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_(\"core.pages.index.content_title\"),\n buttons=buttons,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n if len(agency.eligibility_verifiers.all()) == 1:\n return redirect(reverse(ROUTE_ELIGIBILITY))\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n button.label = _(\"core.pages.agency_index.button.label\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n help_page = reverse(ROUTE_HELP)\n context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n\n return TemplateResponse(request, TEMPLATE_AGENCY, context_dict)\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\n\nTEMPLATE_PAGE = \"core/page.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n content_title=_(\"core.pages.index.content_title\"),\n buttons=buttons,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n if len(agency.eligibility_verifiers.all()) == 1:\n return redirect(reverse(ROUTE_ELIGIBILITY))\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n content_title=_(\"core.pages.agency_index.mst_cc.content_title\"),\n button=button,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]} | 1,851 | 229 |
gh_patches_debug_1433 | rasdani/github-patches | git_diff | translate__translate-3603 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
po2ts fails with ascii encode error on py2 (should use utf-8)
Test file:
[octave.zip](https://github.com/translate/translate/files/870288/octave.zip)
```
$ po2ts octave.po oct.ts
processing 1 files...
po2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128)
[###########################################] 100%
$ python --version
Python 2.7.12
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `translate/convert/po2ts.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2004-2006 Zuza Software Foundation
5 #
6 # This file is part of translate.
7 #
8 # translate is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or
11 # (at your option) any later version.
12 #
13 # translate is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this program; if not, see <http://www.gnu.org/licenses/>.
20
21 """Convert Gettext PO localization files to Qt Linguist (.ts) files.
22
23 See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html
24 for examples and usage instructions.
25 """
26
27 from translate.storage import po, ts
28
29
30 class po2ts(object):
31
32 def convertstore(self, inputstore, templatefile=None, context=None):
33 """converts a .po file to .ts format (using a template .ts file if given)"""
34 if templatefile is None:
35 tsfile = ts.QtTsParser()
36 else:
37 tsfile = ts.QtTsParser(templatefile)
38 for inputunit in inputstore.units:
39 if inputunit.isheader() or inputunit.isblank():
40 continue
41 source = inputunit.source
42 translation = inputunit.target
43 comment = inputunit.getnotes("translator")
44 transtype = None
45 if not inputunit.istranslated():
46 transtype = "unfinished"
47 elif inputunit.getnotes("developer") == "(obsolete)":
48 transtype = "obsolete"
49 if isinstance(source, bytes):
50 source = source.decode("utf-8")
51 if isinstance(translation, bytes):
52 translation = translation.decode("utf-8")
53 for sourcelocation in inputunit.getlocations():
54 if context is None:
55 if "#" in sourcelocation:
56 contextname = sourcelocation[:sourcelocation.find("#")]
57 else:
58 contextname = sourcelocation
59 else:
60 contextname = context
61 tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)
62 return tsfile.getxml()
63
64
65 def convertpo(inputfile, outputfile, templatefile, context):
66 """reads in stdin using fromfileclass, converts using convertorclass, writes to stdout"""
67 inputstore = po.pofile(inputfile)
68 if inputstore.isempty():
69 return 0
70 convertor = po2ts()
71 outputstring = convertor.convertstore(inputstore, templatefile, context)
72 outputfile.write(outputstring)
73 return 1
74
75
76 def main(argv=None):
77 from translate.convert import convert
78 formats = {"po": ("ts", convertpo), ("po", "ts"): ("ts", convertpo)}
79 parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)
80 parser.add_option("-c", "--context", dest="context", default=None,
81 help="use supplied context instead of the one in the .po file comment")
82 parser.passthrough.append("context")
83 parser.run(argv)
84
85
86 if __name__ == '__main__':
87 main()
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py
--- a/translate/convert/po2ts.py
+++ b/translate/convert/po2ts.py
@@ -69,7 +69,7 @@
return 0
convertor = po2ts()
outputstring = convertor.convertstore(inputstore, templatefile, context)
- outputfile.write(outputstring)
+ outputfile.write(outputstring.encode('utf-8'))
return 1
| {"golden_diff": "diff --git a/translate/convert/po2ts.py b/translate/convert/po2ts.py\n--- a/translate/convert/po2ts.py\n+++ b/translate/convert/po2ts.py\n@@ -69,7 +69,7 @@\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n- outputfile.write(outputstring)\n+ outputfile.write(outputstring.encode('utf-8'))\n return 1\n", "issue": "po2ts fails with ascii encode error on py2 (should use utf-8)\nTest file:\r\n[octave.zip](https://github.com/translate/translate/files/870288/octave.zip)\r\n\r\n```\r\n$ po2ts octave.po oct.ts\r\nprocessing 1 files...\r\npo2ts: WARNING: Error processing: input octave.po, output oct.ts, template None: 'ascii' codec can't encode characters in position 187-188: ordinal not in range(128)\r\n[###########################################] 100%\r\n\r\n$ python --version\r\nPython 2.7.12\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring)\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2ts.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2004-2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Convert Gettext PO localization files to Qt Linguist (.ts) files.\n\nSee: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ts2po.html\nfor examples and usage instructions.\n\"\"\"\n\nfrom translate.storage import po, ts\n\n\nclass po2ts(object):\n\n def convertstore(self, inputstore, templatefile=None, context=None):\n \"\"\"converts a .po file to .ts format (using a template .ts file if given)\"\"\"\n if templatefile is None:\n tsfile = ts.QtTsParser()\n else:\n tsfile = ts.QtTsParser(templatefile)\n for inputunit in inputstore.units:\n if inputunit.isheader() or inputunit.isblank():\n continue\n source = inputunit.source\n translation = inputunit.target\n comment = inputunit.getnotes(\"translator\")\n transtype = None\n if not inputunit.istranslated():\n transtype = \"unfinished\"\n elif inputunit.getnotes(\"developer\") == \"(obsolete)\":\n transtype = \"obsolete\"\n if isinstance(source, bytes):\n source = source.decode(\"utf-8\")\n if isinstance(translation, bytes):\n translation = translation.decode(\"utf-8\")\n for sourcelocation in inputunit.getlocations():\n if context is None:\n if \"#\" in sourcelocation:\n contextname = sourcelocation[:sourcelocation.find(\"#\")]\n else:\n contextname = sourcelocation\n else:\n contextname = context\n tsfile.addtranslation(contextname, source, translation, comment, transtype, createifmissing=True)\n return tsfile.getxml()\n\n\ndef convertpo(inputfile, outputfile, templatefile, context):\n \"\"\"reads in stdin using fromfileclass, converts using convertorclass, writes to stdout\"\"\"\n inputstore = po.pofile(inputfile)\n if inputstore.isempty():\n return 0\n convertor = po2ts()\n outputstring = convertor.convertstore(inputstore, templatefile, context)\n outputfile.write(outputstring.encode('utf-8'))\n return 1\n\n\ndef main(argv=None):\n from translate.convert import convert\n formats = {\"po\": (\"ts\", convertpo), (\"po\", \"ts\"): (\"ts\", convertpo)}\n parser = convert.ConvertOptionParser(formats, usepots=False, usetemplates=True, description=__doc__)\n parser.add_option(\"-c\", \"--context\", dest=\"context\", default=None,\n help=\"use supplied context instead of the one in the .po file comment\")\n parser.passthrough.append(\"context\")\n parser.run(argv)\n\n\nif __name__ == '__main__':\n main()\n", "path": "translate/convert/po2ts.py"}]} | 1,316 | 115 |
gh_patches_debug_63302 | rasdani/github-patches | git_diff | scikit-hep__pyhf-915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cloudpickle v1.5.0 breaks testing
# Description
With the release of [`cloudpickle` `v1.5.0`](https://pypi.org/project/cloudpickle/1.5.0/) on 2020-07-01 the CI is broken in testing as the following error is raised
```pytb
ImportError while loading conftest '/home/runner/work/pyhf/pyhf/tests/conftest.py'.
tests/conftest.py:83: in <module>
(pyhf.tensor.tensorflow_backend(), None),
src/pyhf/tensor/__init__.py:44: in __getattr__
e,
E pyhf.exceptions.ImportBackendError: ('There was a problem importing TensorFlow. The tensorflow backend cannot be used.', ImportError("cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/opt/hostedtoolcache/Python/3.7.7/x64/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)"))
##[error]Process completed with exit code 4.
```
`cloudpickle` is a required dependency of TensorFlow Probability and in TFP `v0.10.0` it is set to [`cloudpickle >= 1.2.2`](https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/setup.py#L34).
This has been reported in:
- [TensorFlow Probability Issue 991](https://github.com/tensorflow/probability/issues/991)
- [`cloudpickle` Issue 390](https://github.com/cloudpipe/cloudpickle/issues/390)
# Expected Behavior
For no error to be raised
# Actual Behavior
c.f. above
# Steps to Reproduce
This was found in CI, but the minimal test case is just to install TensorFlow and TensorFlow Probability and then try to import TFP:
```
$ python -m pip install tensorflow tensorflow-probability
$ python -c "import tensorflow_probability"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/__init__.py", line 76, in <module>
from tensorflow_probability.python import * # pylint: disable=wildcard-import
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/__init__.py", line 23, in <module>
from tensorflow_probability.python import distributions
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/__init__.py", line 88, in <module>
from tensorflow_probability.python.distributions.pixel_cnn import PixelCNN
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/pixel_cnn.py", line 37, in <module>
from tensorflow_probability.python.layers import weight_norm
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/__init__.py", line 31, in <module>
from tensorflow_probability.python.layers.distribution_layer import CategoricalMixtureOfOneHotCategorical
File "/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/distribution_layer.py", line 28, in <module>
from cloudpickle.cloudpickle import CloudPickler
ImportError: cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)
$ pip list | grep cloudpickle
cloudpickle 1.5.0
```
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 extras_require = {
4 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
5 'torch': ['torch~=1.2'],
6 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
7 'xmlio': ['uproot'],
8 'minuit': ['iminuit'],
9 }
10 extras_require['backends'] = sorted(
11 set(
12 extras_require['tensorflow']
13 + extras_require['torch']
14 + extras_require['jax']
15 + extras_require['minuit']
16 )
17 )
18 extras_require['contrib'] = sorted(set(['matplotlib']))
19 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
20
21 extras_require['test'] = sorted(
22 set(
23 extras_require['backends']
24 + extras_require['xmlio']
25 + extras_require['contrib']
26 + [
27 'pytest~=3.5',
28 'pytest-cov>=2.5.1',
29 'pytest-mock',
30 'pytest-benchmark[histogram]',
31 'pytest-console-scripts',
32 'pytest-mpl',
33 'pydocstyle',
34 'coverage>=4.0', # coveralls
35 'papermill~=2.0',
36 'nteract-scrapbook~=0.2',
37 'jupyter',
38 'uproot~=3.3',
39 'graphviz',
40 'jsonpatch',
41 ]
42 )
43 )
44 extras_require['docs'] = sorted(
45 set(
46 [
47 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs
48 'sphinxcontrib-bibtex',
49 'sphinx-click',
50 'sphinx_rtd_theme',
51 'nbsphinx',
52 'ipywidgets',
53 'sphinx-issues',
54 'sphinx-copybutton>0.2.9',
55 ]
56 )
57 )
58 extras_require['develop'] = sorted(
59 set(
60 extras_require['docs']
61 + extras_require['lint']
62 + extras_require['test']
63 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
64 )
65 )
66 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
67
68
69 setup(
70 extras_require=extras_require,
71 use_scm_version=lambda: {'local_scheme': lambda version: ''},
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,7 +1,11 @@
from setuptools import setup
extras_require = {
- 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
+ 'tensorflow': [
+ 'tensorflow~=2.0',
+ 'tensorflow-probability~=0.8',
+ 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11
+ ],
'torch': ['torch~=1.2'],
'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
'xmlio': ['uproot'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,11 @@\n from setuptools import setup\n \n extras_require = {\n- 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n+ 'tensorflow': [\n+ 'tensorflow~=2.0',\n+ 'tensorflow-probability~=0.8',\n+ 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n+ ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n", "issue": "cloudpickle v1.5.0 breaks testing\n# Description\r\n\r\nWith the release of [`cloudpickle` `v1.5.0`](https://pypi.org/project/cloudpickle/1.5.0/) on 2020-07-01 the CI is broken in testing as the following error is raised\r\n\r\n```pytb\r\nImportError while loading conftest '/home/runner/work/pyhf/pyhf/tests/conftest.py'.\r\ntests/conftest.py:83: in <module>\r\n (pyhf.tensor.tensorflow_backend(), None),\r\nsrc/pyhf/tensor/__init__.py:44: in __getattr__\r\n e,\r\nE pyhf.exceptions.ImportBackendError: ('There was a problem importing TensorFlow. The tensorflow backend cannot be used.', ImportError(\"cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/opt/hostedtoolcache/Python/3.7.7/x64/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)\"))\r\n##[error]Process completed with exit code 4.\r\n```\r\n\r\n`cloudpickle` is a required dependency of TensorFlow Probability and in TFP `v0.10.0` it is set to [`cloudpickle >= 1.2.2`](https://github.com/tensorflow/probability/blob/f051e03dd3cc847d31061803c2b31c564562a993/setup.py#L34).\r\n\r\nThis has been reported in:\r\n- [TensorFlow Probability Issue 991](https://github.com/tensorflow/probability/issues/991)\r\n- [`cloudpickle` Issue 390](https://github.com/cloudpipe/cloudpickle/issues/390)\r\n\r\n# Expected Behavior\r\n\r\nFor no error to be raised\r\n\r\n# Actual Behavior\r\n\r\nc.f. above\r\n\r\n# Steps to Reproduce\r\n\r\nThis was found in CI, but the minimal test case is just to install TensorFlow and TensorFlow Probability and then try to import TFP:\r\n\r\n```\r\n$ python -m pip install tensorflow tensorflow-probability\r\n$ python -c \"import tensorflow_probability\"\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/__init__.py\", line 76, in <module>\r\n from tensorflow_probability.python import * # pylint: disable=wildcard-import\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/__init__.py\", line 23, in <module>\r\n from tensorflow_probability.python import distributions\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/__init__.py\", line 88, in <module>\r\n from tensorflow_probability.python.distributions.pixel_cnn import PixelCNN\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/distributions/pixel_cnn.py\", line 37, in <module>\r\n from tensorflow_probability.python.layers import weight_norm\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/__init__.py\", line 31, in <module>\r\n from tensorflow_probability.python.layers.distribution_layer import CategoricalMixtureOfOneHotCategorical\r\n File \"/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/tensorflow_probability/python/layers/distribution_layer.py\", line 28, in <module>\r\n from cloudpickle.cloudpickle import CloudPickler\r\nImportError: cannot import name 'CloudPickler' from 'cloudpickle.cloudpickle' (/home/feickert/.venvs/debug-this/lib/python3.7/site-packages/cloudpickle/cloudpickle.py)\r\n$ pip list | grep cloudpickle\r\ncloudpickle 1.5.0\r\n```\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.8',\n 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,860 | 174 |
gh_patches_debug_17889 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1763 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sector vocabulary saved value not updated
## Test plan
GIVEN the project editor
WHEN the sector vocabulary AND sector code are filled in
THEN the 'saved-value' attribute of the vocabulary should be correctly updated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/sector.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from django.db import models
9 from django.db.models.signals import post_save
10 from django.dispatch import receiver
11 from django.core.validators import MaxValueValidator, MinValueValidator
12 from django.utils.translation import ugettext_lazy as _
13
14 from ..fields import ValidXMLCharField
15
16 from akvo.codelists import models as codelist_models
17 from akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY
18 from akvo.utils import codelist_choices, codelist_value
19
20
21 class Sector(models.Model):
22 project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors')
23 sector_code = ValidXMLCharField(
24 _(u'sector code'), blank=True, max_length=5,
25 help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>'
26 u'See these lists for the DAC-5 and DAC-3 sector codes:<br>'
27 u'- <a href="http://iatistandard.org/201/codelists/Sector/" target="_blank">'
28 u'DAC-5 sector codes</a><br>'
29 u'- <a href="http://iatistandard.org/201/codelists/SectorCategory/" '
30 u'target="_blank">DAC-3 sector codes</a>')
31 )
32 text = ValidXMLCharField(
33 _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')
34 )
35 vocabulary = ValidXMLCharField(
36 _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)
37 )
38 percentage = models.DecimalField(
39 _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1,
40 validators=[MaxValueValidator(100), MinValueValidator(0)],
41 help_text=_(u'You can set the percentage of the project that is relevant for '
42 u'this sector here.')
43 )
44
45 def __unicode__(self):
46 if self.sector_code:
47 try:
48 sector_unicode = self.iati_sector().name.capitalize()
49 except Exception as e:
50 sector_unicode = u'%s' % _(u'Sector code not found')
51 else:
52 sector_unicode = u'%s' % _(u'No sector code specified')
53
54 if self.percentage:
55 sector_unicode += u' (%s%%)' % str(self.percentage)
56
57 return sector_unicode
58
59
60 def iati_sector_codes(self):
61 if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):
62 return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code')
63 elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):
64 return self.sector_code, codelist_value(codelist_models.SectorCategory,
65 self,
66 'sector_code')
67 else:
68 return self.sector_code, self.sector_code
69
70 def iati_sector(self):
71 if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):
72 return codelist_value(codelist_models.Sector, self, 'sector_code')
73 elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):
74 return codelist_value(codelist_models.SectorCategory, self, 'sector_code')
75 else:
76 return self.sector_code
77
78 def iati_vocabulary(self):
79 return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary')
80
81 class Meta:
82 app_label = 'rsr'
83 verbose_name = _(u'sector')
84 verbose_name_plural = _(u'sectors')
85
86 @receiver(post_save, sender=Sector)
87 def update_vocabulary(sender, **kwargs):
88 "Updates the vocabulary if not specified."
89 sector = kwargs['instance']
90 if not sector.vocabulary and sector.sector_code:
91 if len(sector.sector_code) == 3:
92 sector.vocabulary = '2'
93 elif len(sector.sector_code) == 5:
94 sector.vocabulary = '1'
95 sector.save()
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/models/sector.py b/akvo/rsr/models/sector.py
--- a/akvo/rsr/models/sector.py
+++ b/akvo/rsr/models/sector.py
@@ -6,8 +6,6 @@
from django.db import models
-from django.db.models.signals import post_save
-from django.dispatch import receiver
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils.translation import ugettext_lazy as _
@@ -82,14 +80,3 @@
app_label = 'rsr'
verbose_name = _(u'sector')
verbose_name_plural = _(u'sectors')
-
-@receiver(post_save, sender=Sector)
-def update_vocabulary(sender, **kwargs):
- "Updates the vocabulary if not specified."
- sector = kwargs['instance']
- if not sector.vocabulary and sector.sector_code:
- if len(sector.sector_code) == 3:
- sector.vocabulary = '2'
- elif len(sector.sector_code) == 5:
- sector.vocabulary = '1'
- sector.save()
| {"golden_diff": "diff --git a/akvo/rsr/models/sector.py b/akvo/rsr/models/sector.py\n--- a/akvo/rsr/models/sector.py\n+++ b/akvo/rsr/models/sector.py\n@@ -6,8 +6,6 @@\n \n \n from django.db import models\n-from django.db.models.signals import post_save\n-from django.dispatch import receiver\n from django.core.validators import MaxValueValidator, MinValueValidator\n from django.utils.translation import ugettext_lazy as _\n \n@@ -82,14 +80,3 @@\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n-\n-@receiver(post_save, sender=Sector)\n-def update_vocabulary(sender, **kwargs):\n- \"Updates the vocabulary if not specified.\"\n- sector = kwargs['instance']\n- if not sector.vocabulary and sector.sector_code:\n- if len(sector.sector_code) == 3:\n- sector.vocabulary = '2'\n- elif len(sector.sector_code) == 5:\n- sector.vocabulary = '1'\n- sector.save()\n", "issue": "Sector vocabulary saved value not updated\n## Test plan\n\nGIVEN the project editor\nWHEN the sector vocabulary AND sector code are filled in\nTHEN the 'saved-value' attribute of the vocabulary should be correctly updated\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\nfrom akvo.codelists import models as codelist_models\nfrom akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Sector(models.Model):\n project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors')\n sector_code = ValidXMLCharField(\n _(u'sector code'), blank=True, max_length=5,\n help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>'\n u'See these lists for the DAC-5 and DAC-3 sector codes:<br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/Sector/\" target=\"_blank\">'\n u'DAC-5 sector codes</a><br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/SectorCategory/\" '\n u'target=\"_blank\">DAC-3 sector codes</a>')\n )\n text = ValidXMLCharField(\n _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')\n )\n vocabulary = ValidXMLCharField(\n _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)\n )\n percentage = models.DecimalField(\n _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1,\n validators=[MaxValueValidator(100), MinValueValidator(0)],\n help_text=_(u'You can set the percentage of the project that is relevant for '\n u'this sector here.')\n )\n\n def __unicode__(self):\n if self.sector_code:\n try:\n sector_unicode = self.iati_sector().name.capitalize()\n except Exception as e:\n sector_unicode = u'%s' % _(u'Sector code not found')\n else:\n sector_unicode = u'%s' % _(u'No sector code specified')\n\n if self.percentage:\n sector_unicode += u' (%s%%)' % str(self.percentage)\n\n return sector_unicode\n\n\n def iati_sector_codes(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return self.sector_code, codelist_value(codelist_models.SectorCategory,\n self,\n 'sector_code')\n else:\n return self.sector_code, self.sector_code\n\n def iati_sector(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return codelist_value(codelist_models.SectorCategory, self, 'sector_code')\n else:\n return self.sector_code\n\n def iati_vocabulary(self):\n return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n\n@receiver(post_save, sender=Sector)\ndef update_vocabulary(sender, **kwargs):\n \"Updates the vocabulary if not specified.\"\n sector = kwargs['instance']\n if not sector.vocabulary and sector.sector_code:\n if len(sector.sector_code) == 3:\n sector.vocabulary = '2'\n elif len(sector.sector_code) == 5:\n sector.vocabulary = '1'\n sector.save()\n", "path": "akvo/rsr/models/sector.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\nfrom akvo.codelists import models as codelist_models\nfrom akvo.codelists.store.codelists_v201 import SECTOR_VOCABULARY\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Sector(models.Model):\n project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='sectors')\n sector_code = ValidXMLCharField(\n _(u'sector code'), blank=True, max_length=5,\n help_text=_(u'Enter the sector code of the sectors that the project is working within.<br>'\n u'See these lists for the DAC-5 and DAC-3 sector codes:<br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/Sector/\" target=\"_blank\">'\n u'DAC-5 sector codes</a><br>'\n u'- <a href=\"http://iatistandard.org/201/codelists/SectorCategory/\" '\n u'target=\"_blank\">DAC-3 sector codes</a>')\n )\n text = ValidXMLCharField(\n _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')\n )\n vocabulary = ValidXMLCharField(\n _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)\n )\n percentage = models.DecimalField(\n _(u'sector percentage'), blank=True, null=True, max_digits=4, decimal_places=1,\n validators=[MaxValueValidator(100), MinValueValidator(0)],\n help_text=_(u'You can set the percentage of the project that is relevant for '\n u'this sector here.')\n )\n\n def __unicode__(self):\n if self.sector_code:\n try:\n sector_unicode = self.iati_sector().name.capitalize()\n except Exception as e:\n sector_unicode = u'%s' % _(u'Sector code not found')\n else:\n sector_unicode = u'%s' % _(u'No sector code specified')\n\n if self.percentage:\n sector_unicode += u' (%s%%)' % str(self.percentage)\n\n return sector_unicode\n\n\n def iati_sector_codes(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return self.sector_code, codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return self.sector_code, codelist_value(codelist_models.SectorCategory,\n self,\n 'sector_code')\n else:\n return self.sector_code, self.sector_code\n\n def iati_sector(self):\n if self.sector_code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return codelist_value(codelist_models.Sector, self, 'sector_code')\n elif self.sector_code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return codelist_value(codelist_models.SectorCategory, self, 'sector_code')\n else:\n return self.sector_code\n\n def iati_vocabulary(self):\n return codelist_value(codelist_models.SectorVocabulary, self, 'vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'sector')\n verbose_name_plural = _(u'sectors')\n", "path": "akvo/rsr/models/sector.py"}]} | 1,468 | 246 |
gh_patches_debug_13378 | rasdani/github-patches | git_diff | TheAlgorithms__Python-6467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enter the logic for hash table
### Describe your change:
* [ ] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [x] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [ ] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `data_structures/hashing/double_hash.py`
Content:
```
1 #!/usr/bin/env python3
2 from .hash_table import HashTable
3 from .number_theory.prime_numbers import is_prime, next_prime
4
5
6 class DoubleHash(HashTable):
7 """
8 Hash Table example with open addressing and Double Hash
9 """
10
11 def __init__(self, *args, **kwargs):
12 super().__init__(*args, **kwargs)
13
14 def __hash_function_2(self, value, data):
15
16 next_prime_gt = (
17 next_prime(value % self.size_table)
18 if not is_prime(value % self.size_table)
19 else value % self.size_table
20 ) # gt = bigger than
21 return next_prime_gt - (data % next_prime_gt)
22
23 def __hash_double_function(self, key, data, increment):
24 return (increment * self.__hash_function_2(key, data)) % self.size_table
25
26 def _collision_resolution(self, key, data=None):
27 i = 1
28 new_key = self.hash_function(data)
29
30 while self.values[new_key] is not None and self.values[new_key] != key:
31 new_key = (
32 self.__hash_double_function(key, data, i)
33 if self.balanced_factor() >= self.lim_charge
34 else None
35 )
36 if new_key is None:
37 break
38 else:
39 i += 1
40
41 return new_key
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py
--- a/data_structures/hashing/double_hash.py
+++ b/data_structures/hashing/double_hash.py
@@ -1,4 +1,16 @@
#!/usr/bin/env python3
+"""
+Double hashing is a collision resolving technique in Open Addressed Hash tables.
+Double hashing uses the idea of applying a second hash function to key when a collision
+occurs. The advantage of Double hashing is that it is one of the best form of probing,
+producing a uniform distribution of records throughout a hash table. This technique
+does not yield any clusters. It is one of effective method for resolving collisions.
+
+Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE
+Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.
+
+Reference: https://en.wikipedia.org/wiki/Double_hashing
+"""
from .hash_table import HashTable
from .number_theory.prime_numbers import is_prime, next_prime
| {"golden_diff": "diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py\n--- a/data_structures/hashing/double_hash.py\n+++ b/data_structures/hashing/double_hash.py\n@@ -1,4 +1,16 @@\n #!/usr/bin/env python3\n+\"\"\"\n+Double hashing is a collision resolving technique in Open Addressed Hash tables.\n+Double hashing uses the idea of applying a second hash function to key when a collision\n+occurs. The advantage of Double hashing is that it is one of the best form of probing,\n+producing a uniform distribution of records throughout a hash table. This technique\n+does not yield any clusters. It is one of effective method for resolving collisions.\n+\n+Double hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE\n+Where hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.\n+\n+Reference: https://en.wikipedia.org/wiki/Double_hashing\n+\"\"\"\n from .hash_table import HashTable\n from .number_theory.prime_numbers import is_prime, next_prime\n", "issue": "Enter the logic for hash table\n### Describe your change:\r\n\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [x] Documentation change?\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [ ] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.\r\n* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .hash_table import HashTable\nfrom .number_theory.prime_numbers import is_prime, next_prime\n\n\nclass DoubleHash(HashTable):\n \"\"\"\n Hash Table example with open addressing and Double Hash\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __hash_function_2(self, value, data):\n\n next_prime_gt = (\n next_prime(value % self.size_table)\n if not is_prime(value % self.size_table)\n else value % self.size_table\n ) # gt = bigger than\n return next_prime_gt - (data % next_prime_gt)\n\n def __hash_double_function(self, key, data, increment):\n return (increment * self.__hash_function_2(key, data)) % self.size_table\n\n def _collision_resolution(self, key, data=None):\n i = 1\n new_key = self.hash_function(data)\n\n while self.values[new_key] is not None and self.values[new_key] != key:\n new_key = (\n self.__hash_double_function(key, data, i)\n if self.balanced_factor() >= self.lim_charge\n else None\n )\n if new_key is None:\n break\n else:\n i += 1\n\n return new_key\n", "path": "data_structures/hashing/double_hash.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"\nDouble hashing is a collision resolving technique in Open Addressed Hash tables.\nDouble hashing uses the idea of applying a second hash function to key when a collision\noccurs. The advantage of Double hashing is that it is one of the best form of probing,\nproducing a uniform distribution of records throughout a hash table. This technique\ndoes not yield any clusters. It is one of effective method for resolving collisions.\n\nDouble hashing can be done using: (hash1(key) + i * hash2(key)) % TABLE_SIZE\nWhere hash1() and hash2() are hash functions and TABLE_SIZE is size of hash table.\n\nReference: https://en.wikipedia.org/wiki/Double_hashing\n\"\"\"\nfrom .hash_table import HashTable\nfrom .number_theory.prime_numbers import is_prime, next_prime\n\n\nclass DoubleHash(HashTable):\n \"\"\"\n Hash Table example with open addressing and Double Hash\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __hash_function_2(self, value, data):\n\n next_prime_gt = (\n next_prime(value % self.size_table)\n if not is_prime(value % self.size_table)\n else value % self.size_table\n ) # gt = bigger than\n return next_prime_gt - (data % next_prime_gt)\n\n def __hash_double_function(self, key, data, increment):\n return (increment * self.__hash_function_2(key, data)) % self.size_table\n\n def _collision_resolution(self, key, data=None):\n i = 1\n new_key = self.hash_function(data)\n\n while self.values[new_key] is not None and self.values[new_key] != key:\n new_key = (\n self.__hash_double_function(key, data, i)\n if self.balanced_factor() >= self.lim_charge\n else None\n )\n if new_key is None:\n break\n else:\n i += 1\n\n return new_key\n", "path": "data_structures/hashing/double_hash.py"}]} | 929 | 243 |
gh_patches_debug_29279 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Type Inference 1: Check column against a type
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Different types in Mathesar will enable different operations; for example, strings could be aggregated by concatenating, but numeric types could be aggregated by summing or multiplying. So far, while we can reflect different types, we have no way to determine the type most appropriate for a column.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
Given a `schema`, `table_name`, `column_name`, and `type`, we need to be able to return a boolean giving whether the column can be cast to that type.
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
We may need to take an optional sample size parameter to do this for large data. Performance testing will be necessary.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/types/base.py`
Content:
```
1 from sqlalchemy import create_engine
2 from db import constants
3
4 SCHEMA = f"{constants.MATHESAR_PREFIX}types"
5 # Since we want to have our identifiers quoted appropriately for use in
6 # PostgreSQL, we want to use the postgres dialect preparer to set this up.
7 preparer = create_engine("postgresql://").dialect.identifier_preparer
8
9
10 def get_qualified_name(name):
11 return ".".join([preparer.quote_schema(SCHEMA), name])
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/types/base.py b/db/types/base.py
--- a/db/types/base.py
+++ b/db/types/base.py
@@ -1,5 +1,6 @@
-from sqlalchemy import create_engine
+from sqlalchemy import create_engine, MetaData, Table, DDL
from db import constants
+from db.types import email
SCHEMA = f"{constants.MATHESAR_PREFIX}types"
# Since we want to have our identifiers quoted appropriately for use in
@@ -9,3 +10,41 @@
def get_qualified_name(name):
return ".".join([preparer.quote_schema(SCHEMA), name])
+
+
+def get_supported_alter_column_types(engine):
+ dialect_types = engine.dialect.ischema_names
+ type_map = {
+ # Default Postgres types
+ "boolean": dialect_types.get("boolean"),
+ "interval": dialect_types.get("interval"),
+ "numeric": dialect_types.get("numeric"),
+ "string": dialect_types.get("name"),
+ # Custom Mathesar types
+ "email": dialect_types.get(email.QUALIFIED_EMAIL)
+ }
+ return {k: v for k, v in type_map.items() if v is not None}
+
+
+def alter_column_type(
+ schema, table_name, column_name, target_type_str, engine
+):
+ _preparer = engine.dialect.identifier_preparer
+ supported_types = get_supported_alter_column_types(engine)
+ target_type = supported_types.get(target_type_str.lower())
+ with engine.begin() as conn:
+ metadata = MetaData(bind=engine, schema=schema)
+ table = Table(
+ table_name, metadata, schema=schema, autoload_with=engine
+ )
+ column = table.columns[column_name]
+ prepared_table_name = _preparer.format_table(table)
+ prepared_column_name = _preparer.format_column(column)
+ prepared_type_name = target_type().compile(dialect=engine.dialect)
+ alter_stmt = f"""
+ ALTER TABLE {prepared_table_name}
+ ALTER COLUMN {prepared_column_name}
+ TYPE {prepared_type_name}
+ USING {prepared_column_name}::{prepared_type_name};
+ """
+ conn.execute(DDL(alter_stmt))
| {"golden_diff": "diff --git a/db/types/base.py b/db/types/base.py\n--- a/db/types/base.py\n+++ b/db/types/base.py\n@@ -1,5 +1,6 @@\n-from sqlalchemy import create_engine\n+from sqlalchemy import create_engine, MetaData, Table, DDL\n from db import constants\n+from db.types import email\n \n SCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n # Since we want to have our identifiers quoted appropriately for use in\n@@ -9,3 +10,41 @@\n \n def get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n+\n+\n+def get_supported_alter_column_types(engine):\n+ dialect_types = engine.dialect.ischema_names\n+ type_map = {\n+ # Default Postgres types\n+ \"boolean\": dialect_types.get(\"boolean\"),\n+ \"interval\": dialect_types.get(\"interval\"),\n+ \"numeric\": dialect_types.get(\"numeric\"),\n+ \"string\": dialect_types.get(\"name\"),\n+ # Custom Mathesar types\n+ \"email\": dialect_types.get(email.QUALIFIED_EMAIL)\n+ }\n+ return {k: v for k, v in type_map.items() if v is not None}\n+\n+\n+def alter_column_type(\n+ schema, table_name, column_name, target_type_str, engine\n+):\n+ _preparer = engine.dialect.identifier_preparer\n+ supported_types = get_supported_alter_column_types(engine)\n+ target_type = supported_types.get(target_type_str.lower())\n+ with engine.begin() as conn:\n+ metadata = MetaData(bind=engine, schema=schema)\n+ table = Table(\n+ table_name, metadata, schema=schema, autoload_with=engine\n+ )\n+ column = table.columns[column_name]\n+ prepared_table_name = _preparer.format_table(table)\n+ prepared_column_name = _preparer.format_column(column)\n+ prepared_type_name = target_type().compile(dialect=engine.dialect)\n+ alter_stmt = f\"\"\"\n+ ALTER TABLE {prepared_table_name}\n+ ALTER COLUMN {prepared_column_name}\n+ TYPE {prepared_type_name}\n+ USING {prepared_column_name}::{prepared_type_name};\n+ \"\"\"\n+ conn.execute(DDL(alter_stmt))\n", "issue": "Type Inference 1: Check column against a type\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nDifferent types in Mathesar will enable different operations; for example, strings could be aggregated by concatenating, but numeric types could be aggregated by summing or multiplying. So far, while we can reflect different types, we have no way to determine the type most appropriate for a column.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nGiven a `schema`, `table_name`, `column_name`, and `type`, we need to be able to return a boolean giving whether the column can be cast to that type.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nWe may need to take an optional sample size parameter to do this for large data. Performance testing will be necessary.\r\n\n", "before_files": [{"content": "from sqlalchemy import create_engine\nfrom db import constants\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\ndef get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n", "path": "db/types/base.py"}], "after_files": [{"content": "from sqlalchemy import create_engine, MetaData, Table, DDL\nfrom db import constants\nfrom db.types import email\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\ndef get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n\n\ndef get_supported_alter_column_types(engine):\n dialect_types = engine.dialect.ischema_names\n type_map = {\n # Default Postgres types\n \"boolean\": dialect_types.get(\"boolean\"),\n \"interval\": dialect_types.get(\"interval\"),\n \"numeric\": dialect_types.get(\"numeric\"),\n \"string\": dialect_types.get(\"name\"),\n # Custom Mathesar types\n \"email\": dialect_types.get(email.QUALIFIED_EMAIL)\n }\n return {k: v for k, v in type_map.items() if v is not None}\n\n\ndef alter_column_type(\n schema, table_name, column_name, target_type_str, engine\n):\n _preparer = engine.dialect.identifier_preparer\n supported_types = get_supported_alter_column_types(engine)\n target_type = supported_types.get(target_type_str.lower())\n with engine.begin() as conn:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n table_name, metadata, schema=schema, autoload_with=engine\n )\n column = table.columns[column_name]\n prepared_table_name = _preparer.format_table(table)\n prepared_column_name = _preparer.format_column(column)\n prepared_type_name = target_type().compile(dialect=engine.dialect)\n alter_stmt = f\"\"\"\n ALTER TABLE {prepared_table_name}\n ALTER COLUMN {prepared_column_name}\n TYPE {prepared_type_name}\n USING {prepared_column_name}::{prepared_type_name};\n \"\"\"\n conn.execute(DDL(alter_stmt))\n", "path": "db/types/base.py"}]} | 564 | 487 |
gh_patches_debug_19467 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-675 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Perform penalty calculation after all sanity checks are completed
**Is your feature request related to a problem? Please describe.**
The penalty calculation takes a long time, and there are sanity checks that happen after this, which can be a pain.
**Describe the solution you'd like**
It would be great to have these checks before the penalty calculation for quality-of-life improvements.
**Describe alternatives you've considered**
N.A.
**Additional context**
From Evan C.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `GANDLF/compute/generic.py`
Content:
```
1 from GANDLF.models import get_model
2 from GANDLF.schedulers import get_scheduler
3 from GANDLF.optimizers import get_optimizer
4 from GANDLF.data import (
5 get_train_loader,
6 get_validation_loader,
7 )
8 from GANDLF.utils import (
9 populate_header_in_parameters,
10 parseTrainingCSV,
11 send_model_to_device,
12 get_class_imbalance_weights,
13 )
14
15
16 def create_pytorch_objects(parameters, train_csv=None, val_csv=None, device="cpu"):
17 """
18 This function creates all the PyTorch objects needed for training.
19
20 Args:
21 parameters (dict): The parameters dictionary.
22 train_csv (str): The path to the training CSV file.
23 val_csv (str): The path to the validation CSV file.
24 device (str): The device to perform computations on.
25
26 Returns:
27 model (torch.nn.Module): The model to use for training.
28 optimizer (Optimizer): The optimizer to use for training.
29 train_loader (torch.utils.data.DataLoader): The training data loader.
30 val_loader (torch.utils.data.DataLoader): The validation data loader.
31 scheduler (object): The scheduler to use for training.
32 parameters (dict): The updated parameters dictionary.
33 """
34 # initialize train and val loaders
35 train_loader, val_loader = None, None
36 headers_to_populate_train, headers_to_populate_val = None, None
37
38 if train_csv is not None:
39 # populate the data frames
40 parameters["training_data"], headers_to_populate_train = parseTrainingCSV(
41 train_csv, train=True
42 )
43 parameters = populate_header_in_parameters(
44 parameters, headers_to_populate_train
45 )
46 # get the train loader
47 train_loader = get_train_loader(parameters)
48 parameters["training_samples_size"] = len(train_loader)
49
50 # Calculate the weights here
51 (
52 parameters["weights"],
53 parameters["class_weights"],
54 ) = get_class_imbalance_weights(parameters["training_data"], parameters)
55
56 if val_csv is not None:
57 parameters["validation_data"], headers_to_populate_val = parseTrainingCSV(
58 val_csv, train=False
59 )
60 if headers_to_populate_train is None:
61 parameters = populate_header_in_parameters(
62 parameters, headers_to_populate_val
63 )
64 # get the validation loader
65 val_loader = get_validation_loader(parameters)
66
67 # get the model
68 model = get_model(parameters)
69 parameters["model_parameters"] = model.parameters()
70
71 # get the optimizer
72 optimizer = get_optimizer(parameters)
73 parameters["optimizer_object"] = optimizer
74
75 # send model to correct device
76 (
77 model,
78 parameters["model"]["amp"],
79 parameters["device"],
80 parameters["device_id"],
81 ) = send_model_to_device(
82 model, amp=parameters["model"]["amp"], device=device, optimizer=optimizer
83 )
84
85 # only need to create scheduler if training
86 if train_csv is not None:
87 if not ("step_size" in parameters["scheduler"]):
88 parameters["scheduler"]["step_size"] = (
89 parameters["training_samples_size"] / parameters["learning_rate"]
90 )
91
92 scheduler = get_scheduler(parameters)
93 else:
94 scheduler = None
95
96 # these keys contain generators, and are not needed beyond this point in params
97 generator_keys_to_remove = ["optimizer_object", "model_parameters"]
98 for key in generator_keys_to_remove:
99 parameters.pop(key, None)
100
101 return model, optimizer, train_loader, val_loader, scheduler, parameters
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/GANDLF/compute/generic.py b/GANDLF/compute/generic.py
--- a/GANDLF/compute/generic.py
+++ b/GANDLF/compute/generic.py
@@ -47,12 +47,6 @@
train_loader = get_train_loader(parameters)
parameters["training_samples_size"] = len(train_loader)
- # Calculate the weights here
- (
- parameters["weights"],
- parameters["class_weights"],
- ) = get_class_imbalance_weights(parameters["training_data"], parameters)
-
if val_csv is not None:
parameters["validation_data"], headers_to_populate_val = parseTrainingCSV(
val_csv, train=False
@@ -90,6 +84,13 @@
)
scheduler = get_scheduler(parameters)
+
+ # Calculate the weights here
+ (
+ parameters["weights"],
+ parameters["class_weights"],
+ ) = get_class_imbalance_weights(parameters["training_data"], parameters)
+
else:
scheduler = None
| {"golden_diff": "diff --git a/GANDLF/compute/generic.py b/GANDLF/compute/generic.py\n--- a/GANDLF/compute/generic.py\n+++ b/GANDLF/compute/generic.py\n@@ -47,12 +47,6 @@\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n \n- # Calculate the weights here\n- (\n- parameters[\"weights\"],\n- parameters[\"class_weights\"],\n- ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n-\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n@@ -90,6 +84,13 @@\n )\n \n scheduler = get_scheduler(parameters)\n+\n+ # Calculate the weights here\n+ (\n+ parameters[\"weights\"],\n+ parameters[\"class_weights\"],\n+ ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n+\n else:\n scheduler = None\n", "issue": "Perform penalty calculation after all sanity checks are completed\n**Is your feature request related to a problem? Please describe.**\r\nThe penalty calculation takes a long time, and there are sanity checks that happen after this, which can be a pain.\r\n\r\n**Describe the solution you'd like**\r\nIt would be great to have these checks before the penalty calculation for quality-of-life improvements.\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nFrom Evan C.\n", "before_files": [{"content": "from GANDLF.models import get_model\nfrom GANDLF.schedulers import get_scheduler\nfrom GANDLF.optimizers import get_optimizer\nfrom GANDLF.data import (\n get_train_loader,\n get_validation_loader,\n)\nfrom GANDLF.utils import (\n populate_header_in_parameters,\n parseTrainingCSV,\n send_model_to_device,\n get_class_imbalance_weights,\n)\n\n\ndef create_pytorch_objects(parameters, train_csv=None, val_csv=None, device=\"cpu\"):\n \"\"\"\n This function creates all the PyTorch objects needed for training.\n\n Args:\n parameters (dict): The parameters dictionary.\n train_csv (str): The path to the training CSV file.\n val_csv (str): The path to the validation CSV file.\n device (str): The device to perform computations on.\n\n Returns:\n model (torch.nn.Module): The model to use for training.\n optimizer (Optimizer): The optimizer to use for training.\n train_loader (torch.utils.data.DataLoader): The training data loader.\n val_loader (torch.utils.data.DataLoader): The validation data loader.\n scheduler (object): The scheduler to use for training.\n parameters (dict): The updated parameters dictionary.\n \"\"\"\n # initialize train and val loaders\n train_loader, val_loader = None, None\n headers_to_populate_train, headers_to_populate_val = None, None\n\n if train_csv is not None:\n # populate the data frames\n parameters[\"training_data\"], headers_to_populate_train = parseTrainingCSV(\n train_csv, train=True\n )\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_train\n )\n # get the train loader\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n\n # Calculate the weights here\n (\n parameters[\"weights\"],\n parameters[\"class_weights\"],\n ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n )\n if headers_to_populate_train is None:\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_val\n )\n # get the validation loader\n val_loader = get_validation_loader(parameters)\n\n # get the model\n model = get_model(parameters)\n parameters[\"model_parameters\"] = model.parameters()\n\n # get the optimizer\n optimizer = get_optimizer(parameters)\n parameters[\"optimizer_object\"] = optimizer\n\n # send model to correct device\n (\n model,\n parameters[\"model\"][\"amp\"],\n parameters[\"device\"],\n parameters[\"device_id\"],\n ) = send_model_to_device(\n model, amp=parameters[\"model\"][\"amp\"], device=device, optimizer=optimizer\n )\n\n # only need to create scheduler if training\n if train_csv is not None:\n if not (\"step_size\" in parameters[\"scheduler\"]):\n parameters[\"scheduler\"][\"step_size\"] = (\n parameters[\"training_samples_size\"] / parameters[\"learning_rate\"]\n )\n\n scheduler = get_scheduler(parameters)\n else:\n scheduler = None\n\n # these keys contain generators, and are not needed beyond this point in params\n generator_keys_to_remove = [\"optimizer_object\", \"model_parameters\"]\n for key in generator_keys_to_remove:\n parameters.pop(key, None)\n\n return model, optimizer, train_loader, val_loader, scheduler, parameters\n", "path": "GANDLF/compute/generic.py"}], "after_files": [{"content": "from GANDLF.models import get_model\nfrom GANDLF.schedulers import get_scheduler\nfrom GANDLF.optimizers import get_optimizer\nfrom GANDLF.data import (\n get_train_loader,\n get_validation_loader,\n)\nfrom GANDLF.utils import (\n populate_header_in_parameters,\n parseTrainingCSV,\n send_model_to_device,\n get_class_imbalance_weights,\n)\n\n\ndef create_pytorch_objects(parameters, train_csv=None, val_csv=None, device=\"cpu\"):\n \"\"\"\n This function creates all the PyTorch objects needed for training.\n\n Args:\n parameters (dict): The parameters dictionary.\n train_csv (str): The path to the training CSV file.\n val_csv (str): The path to the validation CSV file.\n device (str): The device to perform computations on.\n\n Returns:\n model (torch.nn.Module): The model to use for training.\n optimizer (Optimizer): The optimizer to use for training.\n train_loader (torch.utils.data.DataLoader): The training data loader.\n val_loader (torch.utils.data.DataLoader): The validation data loader.\n scheduler (object): The scheduler to use for training.\n parameters (dict): The updated parameters dictionary.\n \"\"\"\n # initialize train and val loaders\n train_loader, val_loader = None, None\n headers_to_populate_train, headers_to_populate_val = None, None\n\n if train_csv is not None:\n # populate the data frames\n parameters[\"training_data\"], headers_to_populate_train = parseTrainingCSV(\n train_csv, train=True\n )\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_train\n )\n # get the train loader\n train_loader = get_train_loader(parameters)\n parameters[\"training_samples_size\"] = len(train_loader)\n\n if val_csv is not None:\n parameters[\"validation_data\"], headers_to_populate_val = parseTrainingCSV(\n val_csv, train=False\n )\n if headers_to_populate_train is None:\n parameters = populate_header_in_parameters(\n parameters, headers_to_populate_val\n )\n # get the validation loader\n val_loader = get_validation_loader(parameters)\n\n # get the model\n model = get_model(parameters)\n parameters[\"model_parameters\"] = model.parameters()\n\n # get the optimizer\n optimizer = get_optimizer(parameters)\n parameters[\"optimizer_object\"] = optimizer\n\n # send model to correct device\n (\n model,\n parameters[\"model\"][\"amp\"],\n parameters[\"device\"],\n parameters[\"device_id\"],\n ) = send_model_to_device(\n model, amp=parameters[\"model\"][\"amp\"], device=device, optimizer=optimizer\n )\n\n # only need to create scheduler if training\n if train_csv is not None:\n if not (\"step_size\" in parameters[\"scheduler\"]):\n parameters[\"scheduler\"][\"step_size\"] = (\n parameters[\"training_samples_size\"] / parameters[\"learning_rate\"]\n )\n\n scheduler = get_scheduler(parameters)\n\n # Calculate the weights here\n (\n parameters[\"weights\"],\n parameters[\"class_weights\"],\n ) = get_class_imbalance_weights(parameters[\"training_data\"], parameters)\n\n else:\n scheduler = None\n\n # these keys contain generators, and are not needed beyond this point in params\n generator_keys_to_remove = [\"optimizer_object\", \"model_parameters\"]\n for key in generator_keys_to_remove:\n parameters.pop(key, None)\n\n return model, optimizer, train_loader, val_loader, scheduler, parameters\n", "path": "GANDLF/compute/generic.py"}]} | 1,288 | 224 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.